--- /dev/null
- use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, HandleError, UpdateAddHTLC, LocalFeatures};
+//! Test that monitor update failures don't get our channel state out of sync.
+//! One of the biggest concern with the monitor update failure handling code is that messages
+//! resent after monitor updating is restored are delivered out-of-order, resulting in
+//! commitment_signed messages having "invalid signatures".
+//! To test this we stand up a network of three nodes and read bytes from the fuzz input to denote
+//! actions such as sending payments, handling events, or changing monitor update return values on
+//! a per-node basis. This should allow it to find any cases where the ordering of actions results
+//! in us getting out of sync with ourselves, and, assuming at least one of our recieve- or
+//! send-side handling is correct, other peers. We consider it a failure if any action results in a
+//! channel being force-closed.
+
+//Uncomment this for libfuzzer builds:
+//#![no_main]
+
+extern crate bitcoin;
+extern crate bitcoin_hashes;
+extern crate lightning;
+extern crate secp256k1;
+
+use bitcoin::BitcoinHash;
+use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::transaction::{Transaction, TxOut};
+use bitcoin::blockdata::script::{Builder, Script};
+use bitcoin::blockdata::opcodes;
+use bitcoin::network::constants::Network;
+
+use bitcoin_hashes::Hash as TraitImport;
+use bitcoin_hashes::hash160::Hash as Hash160;
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::sha256d::Hash as Sha256d;
+
+use lightning::chain::chaininterface;
+use lightning::chain::transaction::OutPoint;
+use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil};
+use lightning::chain::keysinterface::{ChannelKeys, KeysInterface};
+use lightning::ln::channelmonitor;
+use lightning::ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, HTLCUpdate};
+use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage, ChannelManagerReadArgs};
+use lightning::ln::router::{Route, RouteHop};
- (ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap(),
++use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, ErrorAction, LightningError, UpdateAddHTLC, LocalFeatures};
+use lightning::util::events;
+use lightning::util::logger::Logger;
+use lightning::util::config::UserConfig;
+use lightning::util::events::{EventsProvider, MessageSendEventsProvider};
+use lightning::util::ser::{Readable, ReadableArgs, Writeable, Writer};
+
+mod utils;
+use utils::test_logger;
+
+use secp256k1::key::{PublicKey,SecretKey};
+use secp256k1::Secp256k1;
+
+use std::mem;
+use std::cmp::Ordering;
+use std::collections::{HashSet, hash_map, HashMap};
+use std::sync::{Arc,Mutex};
+use std::sync::atomic;
+use std::io::Cursor;
+
+struct FuzzEstimator {}
+impl FeeEstimator for FuzzEstimator {
+ fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 {
+ 253
+ }
+}
+
+pub struct TestBroadcaster {}
+impl BroadcasterInterface for TestBroadcaster {
+ fn broadcast_transaction(&self, _tx: &Transaction) { }
+}
+
+pub struct VecWriter(pub Vec<u8>);
+impl Writer for VecWriter {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ self.0.extend_from_slice(buf);
+ Ok(())
+ }
+ fn size_hint(&mut self, size: usize) {
+ self.0.reserve_exact(size);
+ }
+}
+
+static mut IN_RESTORE: bool = false;
+pub struct TestChannelMonitor {
+ pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>,
+ pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
+ pub latest_good_update: Mutex<HashMap<OutPoint, Vec<u8>>>,
+ pub latest_update_good: Mutex<HashMap<OutPoint, bool>>,
+ pub latest_updates_good_at_last_ser: Mutex<HashMap<OutPoint, bool>>,
+ pub should_update_manager: atomic::AtomicBool,
+}
+impl TestChannelMonitor {
+ pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>, feeest: Arc<chaininterface::FeeEstimator>) -> Self {
+ Self {
+ simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, feeest),
+ update_ret: Mutex::new(Ok(())),
+ latest_good_update: Mutex::new(HashMap::new()),
+ latest_update_good: Mutex::new(HashMap::new()),
+ latest_updates_good_at_last_ser: Mutex::new(HashMap::new()),
+ should_update_manager: atomic::AtomicBool::new(false),
+ }
+ }
+}
+impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
+ fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ let ret = self.update_ret.lock().unwrap().clone();
+ if let Ok(()) = ret {
+ let mut ser = VecWriter(Vec::new());
+ monitor.write_for_disk(&mut ser).unwrap();
+ self.latest_good_update.lock().unwrap().insert(funding_txo, ser.0);
+ match self.latest_update_good.lock().unwrap().entry(funding_txo) {
+ hash_map::Entry::Vacant(mut e) => { e.insert(true); },
+ hash_map::Entry::Occupied(mut e) => {
+ if !e.get() && unsafe { IN_RESTORE } {
+ // Technically we can't consider an update to be "good" unless we're doing
+ // it in response to a test_restore_channel_monitor as the channel may
+ // still be waiting on such a call, so only set us to good if we're in the
+ // middle of a restore call.
+ e.insert(true);
+ }
+ },
+ }
+ self.should_update_manager.store(true, atomic::Ordering::Relaxed);
+ } else {
+ self.latest_update_good.lock().unwrap().insert(funding_txo, false);
+ }
+ assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
+ ret
+ }
+
+ fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
+ return self.simple_monitor.fetch_pending_htlc_updated();
+ }
+}
+
+struct KeyProvider {
+ node_id: u8,
+ session_id: atomic::AtomicU8,
+ channel_id: atomic::AtomicU8,
+}
+impl KeysInterface for KeyProvider {
+ fn get_node_secret(&self) -> SecretKey {
+ SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, self.node_id]).unwrap()
+ }
+
+ fn get_destination_script(&self) -> Script {
+ let secp_ctx = Secp256k1::signing_only();
+ let channel_monitor_claim_key = SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, self.node_id]).unwrap();
+ let our_channel_monitor_claim_key_hash = Hash160::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
+ Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
+ }
+
+ fn get_shutdown_pubkey(&self) -> PublicKey {
+ let secp_ctx = Secp256k1::signing_only();
+ PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, self.node_id]).unwrap())
+ }
+
+ fn get_channel_keys(&self, _inbound: bool) -> ChannelKeys {
+ ChannelKeys {
+ funding_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, self.node_id]).unwrap(),
+ revocation_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, self.node_id]).unwrap(),
+ payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, self.node_id]).unwrap(),
+ delayed_payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, self.node_id]).unwrap(),
+ htlc_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, self.node_id]).unwrap(),
+ commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, self.node_id],
+ }
+ }
+
+ fn get_session_key(&self) -> SecretKey {
+ let id = self.session_id.fetch_add(1, atomic::Ordering::Relaxed);
+ SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 10, self.node_id]).unwrap()
+ }
+
+ fn get_channel_id(&self) -> [u8; 32] {
+ let id = self.channel_id.fetch_add(1, atomic::Ordering::Relaxed);
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, id, 11, self.node_id]
+ }
+}
+
+#[inline]
+pub fn do_test(data: &[u8]) {
+ let fee_est = Arc::new(FuzzEstimator{});
+ let broadcast = Arc::new(TestBroadcaster{});
+
+ macro_rules! make_node {
+ ($node_id: expr) => { {
+ let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
+ let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
+ let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
+
+ let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
+ let mut config = UserConfig::new();
+ config.channel_options.fee_proportional_millionths = 0;
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
- Err(HandleError { action: Some(ErrorAction::IgnoreError), .. }) => { },
++ (ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap(),
+ monitor)
+ } }
+ }
+
+ macro_rules! reload_node {
+ ($ser: expr, $node_id: expr, $old_monitors: expr) => { {
+ let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new($node_id.to_string()));
+ let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
+ let monitor = Arc::new(TestChannelMonitor::new(watch.clone(), broadcast.clone(), logger.clone(), fee_est.clone()));
+
+ let keys_manager = Arc::new(KeyProvider { node_id: $node_id, session_id: atomic::AtomicU8::new(0), channel_id: atomic::AtomicU8::new(0) });
+ let mut config = UserConfig::new();
+ config.channel_options.fee_proportional_millionths = 0;
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
+
+ let mut monitors = HashMap::new();
+ let mut old_monitors = $old_monitors.latest_good_update.lock().unwrap();
+ for (outpoint, monitor_ser) in old_monitors.drain() {
+ monitors.insert(outpoint, <(Sha256d, ChannelMonitor)>::read(&mut Cursor::new(&monitor_ser), Arc::clone(&logger)).expect("Failed to read monitor").1);
+ monitor.latest_good_update.lock().unwrap().insert(outpoint, monitor_ser);
+ }
+ let mut monitor_refs = HashMap::new();
+ for (outpoint, monitor) in monitors.iter() {
+ monitor_refs.insert(*outpoint, monitor);
+ }
+
+ let read_args = ChannelManagerReadArgs {
+ keys_manager,
+ fee_estimator: fee_est.clone(),
+ monitor: monitor.clone(),
+ chain_monitor: watch,
+ tx_broadcaster: broadcast.clone(),
+ logger,
+ default_config: config,
+ channel_monitors: &monitor_refs,
+ };
+
+ let res = (<(Sha256d, ChannelManager)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, monitor);
+ for (_, was_good) in $old_monitors.latest_updates_good_at_last_ser.lock().unwrap().iter() {
+ if !was_good {
+ // If the last time we updated a monitor we didn't successfully update (and we
+ // have sense updated our serialized copy of the ChannelManager) we may
+ // force-close the channel on our counterparty cause we know we're missing
+ // something. Thus, we just return here since we can't continue to test.
+ return;
+ }
+ }
+ res
+ } }
+ }
+
+
+ let mut channel_txn = Vec::new();
+ macro_rules! make_channel {
+ ($source: expr, $dest: expr, $chan_id: expr) => { {
+ $source.create_channel($dest.get_our_node_id(), 10000000, 42, 0).unwrap();
+ let open_channel = {
+ let events = $source.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ if let events::MessageSendEvent::SendOpenChannel { ref msg, .. } = events[0] {
+ msg.clone()
+ } else { panic!("Wrong event type"); }
+ };
+
+ $dest.handle_open_channel(&$source.get_our_node_id(), LocalFeatures::new(), &open_channel).unwrap();
+ let accept_channel = {
+ let events = $dest.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ if let events::MessageSendEvent::SendAcceptChannel { ref msg, .. } = events[0] {
+ msg.clone()
+ } else { panic!("Wrong event type"); }
+ };
+
+ $source.handle_accept_channel(&$dest.get_our_node_id(), LocalFeatures::new(), &accept_channel).unwrap();
+ {
+ let events = $source.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let events::Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, .. } = events[0] {
+ let tx = Transaction { version: $chan_id, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+ }]};
+ let funding_output = OutPoint::new(tx.txid(), 0);
+ $source.funding_transaction_generated(&temporary_channel_id, funding_output);
+ channel_txn.push(tx);
+ } else { panic!("Wrong event type"); }
+ }
+
+ let funding_created = {
+ let events = $source.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ if let events::MessageSendEvent::SendFundingCreated { ref msg, .. } = events[0] {
+ msg.clone()
+ } else { panic!("Wrong event type"); }
+ };
+ $dest.handle_funding_created(&$source.get_our_node_id(), &funding_created).unwrap();
+
+ let funding_signed = {
+ let events = $dest.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ if let events::MessageSendEvent::SendFundingSigned { ref msg, .. } = events[0] {
+ msg.clone()
+ } else { panic!("Wrong event type"); }
+ };
+ $source.handle_funding_signed(&$dest.get_our_node_id(), &funding_signed).unwrap();
+
+ {
+ let events = $source.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let events::Event::FundingBroadcastSafe { .. } = events[0] {
+ } else { panic!("Wrong event type"); }
+ }
+ } }
+ }
+
+ macro_rules! confirm_txn {
+ ($node: expr) => { {
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let mut txn = Vec::with_capacity(channel_txn.len());
+ let mut posn = Vec::with_capacity(channel_txn.len());
+ for i in 0..channel_txn.len() {
+ txn.push(&channel_txn[i]);
+ posn.push(i as u32 + 1);
+ }
+ $node.block_connected(&header, 1, &txn, &posn);
+ for i in 2..100 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ $node.block_connected(&header, i, &Vec::new(), &[0; 0]);
+ }
+ } }
+ }
+
+ macro_rules! lock_fundings {
+ ($nodes: expr) => { {
+ let mut node_events = Vec::new();
+ for node in $nodes.iter() {
+ node_events.push(node.get_and_clear_pending_msg_events());
+ }
+ for (idx, node_event) in node_events.iter().enumerate() {
+ for event in node_event {
+ if let events::MessageSendEvent::SendFundingLocked { ref node_id, ref msg } = event {
+ for node in $nodes.iter() {
+ if node.get_our_node_id() == *node_id {
+ node.handle_funding_locked(&$nodes[idx].get_our_node_id(), msg).unwrap();
+ }
+ }
+ } else { panic!("Wrong event type"); }
+ }
+ }
+
+ for node in $nodes.iter() {
+ let events = node.get_and_clear_pending_msg_events();
+ for event in events {
+ if let events::MessageSendEvent::SendAnnouncementSignatures { .. } = event {
+ } else { panic!("Wrong event type"); }
+ }
+ }
+ } }
+ }
+
+ // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest
+ // forwarding.
+ let (mut node_a, mut monitor_a) = make_node!(0);
+ let (mut node_b, mut monitor_b) = make_node!(1);
+ let (mut node_c, mut monitor_c) = make_node!(2);
+
+ let mut nodes = [node_a, node_b, node_c];
+
+ make_channel!(nodes[0], nodes[1], 0);
+ make_channel!(nodes[1], nodes[2], 1);
+
+ for node in nodes.iter() {
+ confirm_txn!(node);
+ }
+
+ lock_fundings!(nodes);
+
+ let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap();
+ let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap();
+
+ let mut payment_id = 0;
+
+ let mut chan_a_disconnected = false;
+ let mut chan_b_disconnected = false;
+ let mut ba_events = Vec::new();
+ let mut bc_events = Vec::new();
+
+ let mut node_a_ser = VecWriter(Vec::new());
+ nodes[0].write(&mut node_a_ser).unwrap();
+ let mut node_b_ser = VecWriter(Vec::new());
+ nodes[1].write(&mut node_b_ser).unwrap();
+ let mut node_c_ser = VecWriter(Vec::new());
+ nodes[2].write(&mut node_c_ser).unwrap();
+
+ macro_rules! test_err {
+ ($res: expr) => {
+ match $res {
+ Ok(()) => {},
++ Err(LightningError { action: ErrorAction::IgnoreError, .. }) => { },
+ _ => { $res.unwrap() },
+ }
+ }
+ }
+
+ macro_rules! test_return {
+ () => { {
+ assert_eq!(nodes[0].list_channels().len(), 1);
+ assert_eq!(nodes[1].list_channels().len(), 2);
+ assert_eq!(nodes[2].list_channels().len(), 1);
+ return;
+ } }
+ }
+
+ let mut read_pos = 0;
+ macro_rules! get_slice {
+ ($len: expr) => {
+ {
+ let slice_len = $len as usize;
+ if data.len() < read_pos + slice_len {
+ test_return!();
+ }
+ read_pos += slice_len;
+ &data[read_pos - slice_len..read_pos]
+ }
+ }
+ }
+
+ loop {
+ macro_rules! send_payment {
+ ($source: expr, $dest: expr) => { {
+ let payment_hash = Sha256::hash(&[payment_id; 1]);
+ payment_id = payment_id.wrapping_add(1);
+ if let Err(_) = $source.send_payment(Route {
+ hops: vec![RouteHop {
+ pubkey: $dest.0.get_our_node_id(),
+ short_channel_id: $dest.1,
+ fee_msat: 5000000,
+ cltv_expiry_delta: 200,
+ }],
+ }, PaymentHash(payment_hash.into_inner())) {
+ // Probably ran out of funds
+ test_return!();
+ }
+ } };
+ ($source: expr, $middle: expr, $dest: expr) => { {
+ let payment_hash = Sha256::hash(&[payment_id; 1]);
+ payment_id = payment_id.wrapping_add(1);
+ if let Err(_) = $source.send_payment(Route {
+ hops: vec![RouteHop {
+ pubkey: $middle.0.get_our_node_id(),
+ short_channel_id: $middle.1,
+ fee_msat: 50000,
+ cltv_expiry_delta: 100,
+ },RouteHop {
+ pubkey: $dest.0.get_our_node_id(),
+ short_channel_id: $dest.1,
+ fee_msat: 5000000,
+ cltv_expiry_delta: 200,
+ }],
+ }, PaymentHash(payment_hash.into_inner())) {
+ // Probably ran out of funds
+ test_return!();
+ }
+ } }
+ }
+
+ macro_rules! process_msg_events {
+ ($node: expr, $corrupt_forward: expr) => { {
+ let events = if $node == 1 {
+ let mut new_events = Vec::new();
+ mem::swap(&mut new_events, &mut ba_events);
+ new_events.extend_from_slice(&bc_events[..]);
+ bc_events.clear();
+ new_events
+ } else { Vec::new() };
+ for event in events.iter().chain(nodes[$node].get_and_clear_pending_msg_events().iter()) {
+ match event {
+ events::MessageSendEvent::UpdateHTLCs { ref node_id, updates: CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ for dest in nodes.iter() {
+ if dest.get_our_node_id() == *node_id {
+ assert!(update_fee.is_none());
+ for update_add in update_add_htlcs {
+ if !$corrupt_forward {
+ test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &update_add));
+ } else {
+ // Corrupt the update_add_htlc message so that its HMAC
+ // check will fail and we generate a
+ // update_fail_malformed_htlc instead of an
+ // update_fail_htlc as we do when we reject a payment.
+ let mut msg_ser = update_add.encode();
+ msg_ser[1000] ^= 0xff;
+ let new_msg = UpdateAddHTLC::read(&mut Cursor::new(&msg_ser)).unwrap();
+ test_err!(dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), &new_msg));
+ }
+ }
+ for update_fulfill in update_fulfill_htlcs {
+ test_err!(dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), &update_fulfill));
+ }
+ for update_fail in update_fail_htlcs {
+ test_err!(dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), &update_fail));
+ }
+ for update_fail_malformed in update_fail_malformed_htlcs {
+ test_err!(dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), &update_fail_malformed));
+ }
+ test_err!(dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed));
+ }
+ }
+ },
+ events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ for dest in nodes.iter() {
+ if dest.get_our_node_id() == *node_id {
+ test_err!(dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg));
+ }
+ }
+ },
+ events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
+ for dest in nodes.iter() {
+ if dest.get_our_node_id() == *node_id {
+ test_err!(dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg));
+ }
+ }
+ },
+ events::MessageSendEvent::SendFundingLocked { .. } => {
+ // Can be generated as a reestablish response
+ },
+ events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {
+ // Can be generated due to a payment forward being rejected due to a
+ // channel having previously failed a monitor update
+ },
+ _ => panic!("Unhandled message event"),
+ }
+ }
+ } }
+ }
+
+ macro_rules! drain_msg_events_on_disconnect {
+ ($counterparty_id: expr) => { {
+ if $counterparty_id == 0 {
+ for event in nodes[0].get_and_clear_pending_msg_events() {
+ match event {
+ events::MessageSendEvent::UpdateHTLCs { .. } => {},
+ events::MessageSendEvent::SendRevokeAndACK { .. } => {},
+ events::MessageSendEvent::SendChannelReestablish { .. } => {},
+ events::MessageSendEvent::SendFundingLocked { .. } => {},
+ events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ _ => panic!("Unhandled message event"),
+ }
+ }
+ ba_events.clear();
+ } else {
+ for event in nodes[2].get_and_clear_pending_msg_events() {
+ match event {
+ events::MessageSendEvent::UpdateHTLCs { .. } => {},
+ events::MessageSendEvent::SendRevokeAndACK { .. } => {},
+ events::MessageSendEvent::SendChannelReestablish { .. } => {},
+ events::MessageSendEvent::SendFundingLocked { .. } => {},
+ events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ _ => panic!("Unhandled message event"),
+ }
+ }
+ bc_events.clear();
+ }
+ let mut events = nodes[1].get_and_clear_pending_msg_events();
+ let drop_node_id = if $counterparty_id == 0 { nodes[0].get_our_node_id() } else { nodes[2].get_our_node_id() };
+ let msg_sink = if $counterparty_id == 0 { &mut bc_events } else { &mut ba_events };
+ for event in events.drain(..) {
+ let push = match event {
+ events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => {
+ if *node_id != drop_node_id { true } else { false }
+ },
+ events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => {
+ if *node_id != drop_node_id { true } else { false }
+ },
+ events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => {
+ if *node_id != drop_node_id { true } else { false }
+ },
+ events::MessageSendEvent::SendFundingLocked { .. } => false,
+ events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => false,
+ _ => panic!("Unhandled message event"),
+ };
+ if push { msg_sink.push(event); }
+ }
+ } }
+ }
+
+ macro_rules! process_events {
+ ($node: expr, $fail: expr) => { {
+ // In case we get 256 payments we may have a hash collision, resulting in the
+ // second claim/fail call not finding the duplicate-hash HTLC, so we have to
+ // deduplicate the calls here.
+ let mut claim_set = HashSet::new();
+ let mut events = nodes[$node].get_and_clear_pending_events();
+ // Sort events so that PendingHTLCsForwardable get processed last. This avoids a
+ // case where we first process a PendingHTLCsForwardable, then claim/fail on a
+ // PaymentReceived, claiming/failing two HTLCs, but leaving a just-generated
+ // PaymentReceived event for the second HTLC in our pending_events (and breaking
+ // our claim_set deduplication).
+ events.sort_by(|a, b| {
+ if let events::Event::PaymentReceived { .. } = a {
+ if let events::Event::PendingHTLCsForwardable { .. } = b {
+ Ordering::Less
+ } else { Ordering::Equal }
+ } else if let events::Event::PendingHTLCsForwardable { .. } = a {
+ if let events::Event::PaymentReceived { .. } = b {
+ Ordering::Greater
+ } else { Ordering::Equal }
+ } else { Ordering::Equal }
+ });
+ for event in events.drain(..) {
+ match event {
+ events::Event::PaymentReceived { payment_hash, .. } => {
+ if claim_set.insert(payment_hash.0) {
+ if $fail {
+ assert!(nodes[$node].fail_htlc_backwards(&payment_hash));
+ } else {
+ assert!(nodes[$node].claim_funds(PaymentPreimage(payment_hash.0)));
+ }
+ }
+ },
+ events::Event::PaymentSent { .. } => {},
+ events::Event::PaymentFailed { .. } => {},
+ events::Event::PendingHTLCsForwardable { .. } => {
+ nodes[$node].process_pending_htlc_forwards();
+ },
+ _ => panic!("Unhandled event"),
+ }
+ }
+ } }
+ }
+
+ match get_slice!(1)[0] {
+ 0x00 => *monitor_a.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x01 => *monitor_b.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x02 => *monitor_c.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure),
+ 0x03 => *monitor_a.update_ret.lock().unwrap() = Ok(()),
+ 0x04 => *monitor_b.update_ret.lock().unwrap() = Ok(()),
+ 0x05 => *monitor_c.update_ret.lock().unwrap() = Ok(()),
+ 0x06 => { unsafe { IN_RESTORE = true }; nodes[0].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
+ 0x07 => { unsafe { IN_RESTORE = true }; nodes[1].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
+ 0x08 => { unsafe { IN_RESTORE = true }; nodes[2].test_restore_channel_monitor(); unsafe { IN_RESTORE = false }; },
+ 0x09 => send_payment!(nodes[0], (&nodes[1], chan_a)),
+ 0x0a => send_payment!(nodes[1], (&nodes[0], chan_a)),
+ 0x0b => send_payment!(nodes[1], (&nodes[2], chan_b)),
+ 0x0c => send_payment!(nodes[2], (&nodes[1], chan_b)),
+ 0x0d => send_payment!(nodes[0], (&nodes[1], chan_a), (&nodes[2], chan_b)),
+ 0x0e => send_payment!(nodes[2], (&nodes[1], chan_b), (&nodes[0], chan_a)),
+ 0x0f => {
+ if !chan_a_disconnected {
+ nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
+ nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+ chan_a_disconnected = true;
+ drain_msg_events_on_disconnect!(0);
+ }
+ },
+ 0x10 => {
+ if !chan_b_disconnected {
+ nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
+ nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+ chan_b_disconnected = true;
+ drain_msg_events_on_disconnect!(2);
+ }
+ },
+ 0x11 => {
+ if chan_a_disconnected {
+ nodes[0].peer_connected(&nodes[1].get_our_node_id());
+ nodes[1].peer_connected(&nodes[0].get_our_node_id());
+ chan_a_disconnected = false;
+ }
+ },
+ 0x12 => {
+ if chan_b_disconnected {
+ nodes[1].peer_connected(&nodes[2].get_our_node_id());
+ nodes[2].peer_connected(&nodes[1].get_our_node_id());
+ chan_b_disconnected = false;
+ }
+ },
+ 0x13 => process_msg_events!(0, true),
+ 0x14 => process_msg_events!(0, false),
+ 0x15 => process_events!(0, true),
+ 0x16 => process_events!(0, false),
+ 0x17 => process_msg_events!(1, true),
+ 0x18 => process_msg_events!(1, false),
+ 0x19 => process_events!(1, true),
+ 0x1a => process_events!(1, false),
+ 0x1b => process_msg_events!(2, true),
+ 0x1c => process_msg_events!(2, false),
+ 0x1d => process_events!(2, true),
+ 0x1e => process_events!(2, false),
+ 0x1f => {
+ if !chan_a_disconnected {
+ nodes[1].peer_disconnected(&nodes[0].get_our_node_id(), false);
+ chan_a_disconnected = true;
+ drain_msg_events_on_disconnect!(0);
+ }
+ let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a);
+ node_a = Arc::new(new_node_a);
+ nodes[0] = node_a.clone();
+ monitor_a = new_monitor_a;
+ },
+ 0x20 => {
+ if !chan_a_disconnected {
+ nodes[0].peer_disconnected(&nodes[1].get_our_node_id(), false);
+ chan_a_disconnected = true;
+ nodes[0].get_and_clear_pending_msg_events();
+ ba_events.clear();
+ }
+ if !chan_b_disconnected {
+ nodes[2].peer_disconnected(&nodes[1].get_our_node_id(), false);
+ chan_b_disconnected = true;
+ nodes[2].get_and_clear_pending_msg_events();
+ bc_events.clear();
+ }
+ let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b);
+ node_b = Arc::new(new_node_b);
+ nodes[1] = node_b.clone();
+ monitor_b = new_monitor_b;
+ },
+ 0x21 => {
+ if !chan_b_disconnected {
+ nodes[1].peer_disconnected(&nodes[2].get_our_node_id(), false);
+ chan_b_disconnected = true;
+ drain_msg_events_on_disconnect!(2);
+ }
+ let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c);
+ node_c = Arc::new(new_node_c);
+ nodes[2] = node_c.clone();
+ monitor_c = new_monitor_c;
+ },
+ _ => test_return!(),
+ }
+
+ if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
+ node_a_ser.0.clear();
+ nodes[0].write(&mut node_a_ser).unwrap();
+ monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
+ *monitor_a.latest_updates_good_at_last_ser.lock().unwrap() = monitor_a.latest_update_good.lock().unwrap().clone();
+ }
+ if monitor_b.should_update_manager.load(atomic::Ordering::Relaxed) {
+ node_b_ser.0.clear();
+ nodes[1].write(&mut node_b_ser).unwrap();
+ monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
+ *monitor_b.latest_updates_good_at_last_ser.lock().unwrap() = monitor_b.latest_update_good.lock().unwrap().clone();
+ }
+ if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
+ node_c_ser.0.clear();
+ nodes[2].write(&mut node_c_ser).unwrap();
+ monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
+ *monitor_c.latest_updates_good_at_last_ser.lock().unwrap() = monitor_c.latest_update_good.lock().unwrap().clone();
+ }
+ }
+}
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ do_test(data);
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ do_test(data);
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ do_test(data);
+});
+
+extern crate hex;
+#[cfg(test)]
+mod tests {
+ #[test]
+ fn duplicate_crash() {
+ super::do_test(&::hex::decode("00").unwrap());
+ }
+}
--- /dev/null
- let channelmanager = ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config).unwrap();
+//! Test that no series of bytes received over the wire/connections created/payments sent can
+//! result in a crash. We do this by standing up a node and then reading bytes from input to denote
+//! actions such as creating new inbound/outbound connections, bytes to be read from a connection,
+//! or payments to send/ways to handle events generated.
+//! This test has been very useful, though due to its complexity good starting inputs are critical.
+
+//Uncomment this for libfuzzer builds:
+//#![no_main]
+
+extern crate bitcoin;
+extern crate bitcoin_hashes;
+extern crate lightning;
+extern crate secp256k1;
+
+use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::transaction::{Transaction, TxOut};
+use bitcoin::blockdata::script::{Builder, Script};
+use bitcoin::blockdata::opcodes;
+use bitcoin::consensus::encode::deserialize;
+use bitcoin::network::constants::Network;
+use bitcoin::util::hash::BitcoinHash;
+
+use bitcoin_hashes::Hash as TraitImport;
+use bitcoin_hashes::HashEngine as TraitImportEngine;
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::hash160::Hash as Hash160;
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+
+use lightning::chain::chaininterface::{BroadcasterInterface,ConfirmationTarget,ChainListener,FeeEstimator,ChainWatchInterfaceUtil};
+use lightning::chain::transaction::OutPoint;
+use lightning::chain::keysinterface::{ChannelKeys, KeysInterface};
+use lightning::ln::channelmonitor;
+use lightning::ln::channelmanager::{ChannelManager, PaymentHash, PaymentPreimage};
+use lightning::ln::peer_handler::{MessageHandler,PeerManager,SocketDescriptor};
+use lightning::ln::router::Router;
+use lightning::util::events::{EventsProvider,Event};
+use lightning::util::logger::Logger;
+use lightning::util::config::UserConfig;
+
+mod utils;
+
+use utils::test_logger;
+
+use secp256k1::key::{PublicKey,SecretKey};
+use secp256k1::Secp256k1;
+
+use std::cell::RefCell;
+use std::collections::{HashMap, hash_map};
+use std::cmp;
+use std::hash::Hash;
+use std::sync::Arc;
+use std::sync::atomic::{AtomicU64,AtomicUsize,Ordering};
+
+#[inline]
+pub fn slice_to_be16(v: &[u8]) -> u16 {
+ ((v[0] as u16) << 8*1) |
+ ((v[1] as u16) << 8*0)
+}
+
+#[inline]
+pub fn slice_to_be24(v: &[u8]) -> u32 {
+ ((v[0] as u32) << 8*2) |
+ ((v[1] as u32) << 8*1) |
+ ((v[2] as u32) << 8*0)
+}
+
+#[inline]
+pub fn slice_to_be32(v: &[u8]) -> u32 {
+ ((v[0] as u32) << 8*3) |
+ ((v[1] as u32) << 8*2) |
+ ((v[2] as u32) << 8*1) |
+ ((v[3] as u32) << 8*0)
+}
+
+#[inline]
+pub fn be64_to_array(u: u64) -> [u8; 8] {
+ let mut v = [0; 8];
+ v[0] = ((u >> 8*7) & 0xff) as u8;
+ v[1] = ((u >> 8*6) & 0xff) as u8;
+ v[2] = ((u >> 8*5) & 0xff) as u8;
+ v[3] = ((u >> 8*4) & 0xff) as u8;
+ v[4] = ((u >> 8*3) & 0xff) as u8;
+ v[5] = ((u >> 8*2) & 0xff) as u8;
+ v[6] = ((u >> 8*1) & 0xff) as u8;
+ v[7] = ((u >> 8*0) & 0xff) as u8;
+ v
+}
+
+struct InputData {
+ data: Vec<u8>,
+ read_pos: AtomicUsize,
+}
+impl InputData {
+ fn get_slice(&self, len: usize) -> Option<&[u8]> {
+ let old_pos = self.read_pos.fetch_add(len, Ordering::AcqRel);
+ if self.data.len() < old_pos + len {
+ return None;
+ }
+ Some(&self.data[old_pos..old_pos + len])
+ }
+}
+
+struct FuzzEstimator {
+ input: Arc<InputData>,
+}
+impl FeeEstimator for FuzzEstimator {
+ fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u64 {
+ //TODO: We should actually be testing at least much more than 64k...
+ match self.input.get_slice(2) {
+ Some(slice) => cmp::max(slice_to_be16(slice) as u64, 253),
+ None => 0
+ }
+ }
+}
+
+struct TestBroadcaster {}
+impl BroadcasterInterface for TestBroadcaster {
+ fn broadcast_transaction(&self, _tx: &Transaction) {}
+}
+
+#[derive(Clone)]
+struct Peer<'a> {
+ id: u8,
+ peers_connected: &'a RefCell<[bool; 256]>,
+}
+impl<'a> SocketDescriptor for Peer<'a> {
+ fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize {
+ data.len()
+ }
+ fn disconnect_socket(&mut self) {
+ assert!(self.peers_connected.borrow()[self.id as usize]);
+ self.peers_connected.borrow_mut()[self.id as usize] = false;
+ }
+}
+impl<'a> PartialEq for Peer<'a> {
+ fn eq(&self, other: &Self) -> bool {
+ self.id == other.id
+ }
+}
+impl<'a> Eq for Peer<'a> {}
+impl<'a> Hash for Peer<'a> {
+ fn hash<H : std::hash::Hasher>(&self, h: &mut H) {
+ self.id.hash(h)
+ }
+}
+
+struct MoneyLossDetector<'a> {
+ manager: Arc<ChannelManager>,
+ monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>,
+ handler: PeerManager<Peer<'a>>,
+
+ peers: &'a RefCell<[bool; 256]>,
+ funding_txn: Vec<Transaction>,
+ txids_confirmed: HashMap<Sha256dHash, usize>,
+ header_hashes: Vec<Sha256dHash>,
+ height: usize,
+ max_height: usize,
+ blocks_connected: u32,
+}
+impl<'a> MoneyLossDetector<'a> {
+ pub fn new(peers: &'a RefCell<[bool; 256]>, manager: Arc<ChannelManager>, monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>, handler: PeerManager<Peer<'a>>) -> Self {
+ MoneyLossDetector {
+ manager,
+ monitor,
+ handler,
+
+ peers,
+ funding_txn: Vec::new(),
+ txids_confirmed: HashMap::new(),
+ header_hashes: vec![Default::default()],
+ height: 0,
+ max_height: 0,
+ blocks_connected: 0,
+ }
+ }
+
+ fn connect_block(&mut self, all_txn: &[Transaction]) {
+ let mut txn = Vec::with_capacity(all_txn.len());
+ let mut txn_idxs = Vec::with_capacity(all_txn.len());
+ for (idx, tx) in all_txn.iter().enumerate() {
+ let txid = tx.txid();
+ match self.txids_confirmed.entry(txid) {
+ hash_map::Entry::Vacant(e) => {
+ e.insert(self.height);
+ txn.push(tx);
+ txn_idxs.push(idx as u32 + 1);
+ },
+ _ => {},
+ }
+ }
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height], merkle_root: Default::default(), time: self.blocks_connected, bits: 42, nonce: 42 };
+ self.height += 1;
+ self.blocks_connected += 1;
+ self.manager.block_connected(&header, self.height as u32, &txn[..], &txn_idxs[..]);
+ (*self.monitor).block_connected(&header, self.height as u32, &txn[..], &txn_idxs[..]);
+ if self.header_hashes.len() > self.height {
+ self.header_hashes[self.height] = header.bitcoin_hash();
+ } else {
+ assert_eq!(self.header_hashes.len(), self.height);
+ self.header_hashes.push(header.bitcoin_hash());
+ }
+ self.max_height = cmp::max(self.height, self.max_height);
+ }
+
+ fn disconnect_block(&mut self) {
+ if self.height > 0 && (self.max_height < 6 || self.height >= self.max_height - 6) {
+ self.height -= 1;
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: self.header_hashes[self.height], merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ self.manager.block_disconnected(&header, self.height as u32);
+ self.monitor.block_disconnected(&header, self.height as u32);
+ let removal_height = self.height;
+ self.txids_confirmed.retain(|_, height| {
+ removal_height != *height
+ });
+ }
+ }
+}
+
+impl<'a> Drop for MoneyLossDetector<'a> {
+ fn drop(&mut self) {
+ if !::std::thread::panicking() {
+ // Disconnect all peers
+ for (idx, peer) in self.peers.borrow().iter().enumerate() {
+ if *peer {
+ self.handler.disconnect_event(&Peer{id: idx as u8, peers_connected: &self.peers});
+ }
+ }
+
+ // Force all channels onto the chain (and time out claim txn)
+ self.manager.force_close_all_channels();
+ }
+ }
+}
+
+struct KeyProvider {
+ node_secret: SecretKey,
+ counter: AtomicU64,
+}
+impl KeysInterface for KeyProvider {
+ fn get_node_secret(&self) -> SecretKey {
+ self.node_secret.clone()
+ }
+
+ fn get_destination_script(&self) -> Script {
+ let secp_ctx = Secp256k1::signing_only();
+ let channel_monitor_claim_key = SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
+ let our_channel_monitor_claim_key_hash = <Hash160 as bitcoin_hashes::Hash>::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
+ Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_monitor_claim_key_hash[..]).into_script()
+ }
+
+ fn get_shutdown_pubkey(&self) -> PublicKey {
+ let secp_ctx = Secp256k1::signing_only();
+ PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]).unwrap())
+ }
+
+ fn get_channel_keys(&self, inbound: bool) -> ChannelKeys {
+ let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
+ if inbound {
+ ChannelKeys {
+ funding_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, ctr]).unwrap(),
+ revocation_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, ctr]).unwrap(),
+ payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, ctr]).unwrap(),
+ delayed_payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, ctr]).unwrap(),
+ htlc_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, ctr]).unwrap(),
+ commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, ctr],
+ }
+ } else {
+ ChannelKeys {
+ funding_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, ctr]).unwrap(),
+ revocation_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, ctr]).unwrap(),
+ payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, ctr]).unwrap(),
+ delayed_payment_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, ctr]).unwrap(),
+ htlc_base_key: SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, ctr]).unwrap(),
+ commitment_seed: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, ctr],
+ }
+ }
+ }
+
+ fn get_session_key(&self) -> SecretKey {
+ let ctr = self.counter.fetch_add(1, Ordering::Relaxed) as u8;
+ SecretKey::from_slice(&[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, ctr]).unwrap()
+ }
+
+ fn get_channel_id(&self) -> [u8; 32] {
+ let ctr = self.counter.fetch_add(1, Ordering::Relaxed);
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ (ctr >> 8*7) as u8, (ctr >> 8*6) as u8, (ctr >> 8*5) as u8, (ctr >> 8*4) as u8, (ctr >> 8*3) as u8, (ctr >> 8*2) as u8, (ctr >> 8*1) as u8, 14, (ctr >> 8*0) as u8]
+ }
+}
+
+#[inline]
+pub fn do_test(data: &[u8], logger: &Arc<Logger>) {
+ let input = Arc::new(InputData {
+ data: data.to_vec(),
+ read_pos: AtomicUsize::new(0),
+ });
+ let fee_est = Arc::new(FuzzEstimator {
+ input: input.clone(),
+ });
+
+ macro_rules! get_slice {
+ ($len: expr) => {
+ match input.get_slice($len as usize) {
+ Some(slice) => slice,
+ None => return,
+ }
+ }
+ }
+
+ macro_rules! get_pubkey {
+ () => {
+ match PublicKey::from_slice(get_slice!(33)) {
+ Ok(key) => key,
+ Err(_) => return,
+ }
+ }
+ }
+
+ let our_network_key = match SecretKey::from_slice(get_slice!(32)) {
+ Ok(key) => key,
+ Err(_) => return,
+ };
+
+ let watch = Arc::new(ChainWatchInterfaceUtil::new(Network::Bitcoin, Arc::clone(&logger)));
+ let broadcast = Arc::new(TestBroadcaster{});
+ let monitor = channelmonitor::SimpleManyChannelMonitor::new(watch.clone(), broadcast.clone(), Arc::clone(&logger), fee_est.clone());
+
+ let keys_manager = Arc::new(KeyProvider { node_secret: our_network_key.clone(), counter: AtomicU64::new(0) });
+ let mut config = UserConfig::new();
+ config.channel_options.fee_proportional_millionths = slice_to_be32(get_slice!(4));
+ config.channel_options.announced_channel = get_slice!(1)[0] != 0;
+ config.peer_channel_config_limits.min_dust_limit_satoshis = 0;
++ let channelmanager = ChannelManager::new(Network::Bitcoin, fee_est.clone(), monitor.clone(), watch.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, 0).unwrap();
+ let router = Arc::new(Router::new(PublicKey::from_secret_key(&Secp256k1::signing_only(), &keys_manager.get_node_secret()), watch.clone(), Arc::clone(&logger)));
+
+ let peers = RefCell::new([false; 256]);
+ let mut loss_detector = MoneyLossDetector::new(&peers, channelmanager.clone(), monitor.clone(), PeerManager::new(MessageHandler {
+ chan_handler: channelmanager.clone(),
+ route_handler: router.clone(),
+ }, our_network_key, &[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 0], Arc::clone(&logger)));
+
+ let mut should_forward = false;
+ let mut payments_received: Vec<PaymentHash> = Vec::new();
+ let mut payments_sent = 0;
+ let mut pending_funding_generation: Vec<([u8; 32], u64, Script)> = Vec::new();
+ let mut pending_funding_signatures = HashMap::new();
+ let mut pending_funding_relay = Vec::new();
+
+ loop {
+ match get_slice!(1)[0] {
+ 0 => {
+ let mut new_id = 0;
+ for i in 1..256 {
+ if !peers.borrow()[i-1] {
+ new_id = i;
+ break;
+ }
+ }
+ if new_id == 0 { return; }
+ loss_detector.handler.new_outbound_connection(get_pubkey!(), Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
+ peers.borrow_mut()[new_id - 1] = true;
+ },
+ 1 => {
+ let mut new_id = 0;
+ for i in 1..256 {
+ if !peers.borrow()[i-1] {
+ new_id = i;
+ break;
+ }
+ }
+ if new_id == 0 { return; }
+ loss_detector.handler.new_inbound_connection(Peer{id: (new_id - 1) as u8, peers_connected: &peers}).unwrap();
+ peers.borrow_mut()[new_id - 1] = true;
+ },
+ 2 => {
+ let peer_id = get_slice!(1)[0];
+ if !peers.borrow()[peer_id as usize] { return; }
+ loss_detector.handler.disconnect_event(&Peer{id: peer_id, peers_connected: &peers});
+ peers.borrow_mut()[peer_id as usize] = false;
+ },
+ 3 => {
+ let peer_id = get_slice!(1)[0];
+ if !peers.borrow()[peer_id as usize] { return; }
+ match loss_detector.handler.read_event(&mut Peer{id: peer_id, peers_connected: &peers}, get_slice!(get_slice!(1)[0]).to_vec()) {
+ Ok(res) => assert!(!res),
+ Err(_) => { peers.borrow_mut()[peer_id as usize] = false; }
+ }
+ },
+ 4 => {
+ let value = slice_to_be24(get_slice!(3)) as u64;
+ let route = match router.get_route(&get_pubkey!(), None, &Vec::new(), value, 42) {
+ Ok(route) => route,
+ Err(_) => return,
+ };
+ let mut payment_hash = PaymentHash([0; 32]);
+ payment_hash.0[0..8].copy_from_slice(&be64_to_array(payments_sent));
+ let mut sha = Sha256::engine();
+ sha.input(&payment_hash.0[..]);
+ payment_hash.0 = Sha256::from_engine(sha).into_inner();
+ payments_sent += 1;
+ match channelmanager.send_payment(route, payment_hash) {
+ Ok(_) => {},
+ Err(_) => return,
+ }
+ },
+ 5 => {
+ let peer_id = get_slice!(1)[0];
+ if !peers.borrow()[peer_id as usize] { return; }
+ let their_key = get_pubkey!();
+ let chan_value = slice_to_be24(get_slice!(3)) as u64;
+ let push_msat_value = slice_to_be24(get_slice!(3)) as u64;
+ if channelmanager.create_channel(their_key, chan_value, push_msat_value, 0).is_err() { return; }
+ },
+ 6 => {
+ let mut channels = channelmanager.list_channels();
+ let channel_id = get_slice!(1)[0] as usize;
+ if channel_id >= channels.len() { return; }
+ channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
+ if channelmanager.close_channel(&channels[channel_id].channel_id).is_err() { return; }
+ },
+ 7 => {
+ if should_forward {
+ channelmanager.process_pending_htlc_forwards();
+ should_forward = false;
+ }
+ },
+ 8 => {
+ for payment in payments_received.drain(..) {
+ // SHA256 is defined as XOR of all input bytes placed in the first byte, and 0s
+ // for the remaining bytes. Thus, if not all remaining bytes are 0s we cannot
+ // fulfill this HTLC, but if they are, we can just take the first byte and
+ // place that anywhere in our preimage.
+ if &payment.0[1..] != &[0; 31] {
+ channelmanager.fail_htlc_backwards(&payment);
+ } else {
+ let mut payment_preimage = PaymentPreimage([0; 32]);
+ payment_preimage.0[0] = payment.0[0];
+ channelmanager.claim_funds(payment_preimage);
+ }
+ }
+ },
+ 9 => {
+ for payment in payments_received.drain(..) {
+ channelmanager.fail_htlc_backwards(&payment);
+ }
+ },
+ 10 => {
+ 'outer_loop: for funding_generation in pending_funding_generation.drain(..) {
+ let mut tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ value: funding_generation.1, script_pubkey: funding_generation.2,
+ }] };
+ let funding_output = 'search_loop: loop {
+ let funding_txid = tx.txid();
+ if let None = loss_detector.txids_confirmed.get(&funding_txid) {
+ let outpoint = OutPoint::new(funding_txid, 0);
+ for chan in channelmanager.list_channels() {
+ if chan.channel_id == outpoint.to_channel_id() {
+ tx.version += 1;
+ continue 'search_loop;
+ }
+ }
+ break outpoint;
+ }
+ tx.version += 1;
+ if tx.version > 0xff {
+ continue 'outer_loop;
+ }
+ };
+ channelmanager.funding_transaction_generated(&funding_generation.0, funding_output.clone());
+ pending_funding_signatures.insert(funding_output, tx);
+ }
+ },
+ 11 => {
+ if !pending_funding_relay.is_empty() {
+ loss_detector.connect_block(&pending_funding_relay[..]);
+ for _ in 2..100 {
+ loss_detector.connect_block(&[]);
+ }
+ }
+ for tx in pending_funding_relay.drain(..) {
+ loss_detector.funding_txn.push(tx);
+ }
+ },
+ 12 => {
+ let txlen = slice_to_be16(get_slice!(2));
+ if txlen == 0 {
+ loss_detector.connect_block(&[]);
+ } else {
+ let txres: Result<Transaction, _> = deserialize(get_slice!(txlen));
+ if let Ok(tx) = txres {
+ loss_detector.connect_block(&[tx]);
+ } else {
+ return;
+ }
+ }
+ },
+ 13 => {
+ loss_detector.disconnect_block();
+ },
+ 14 => {
+ let mut channels = channelmanager.list_channels();
+ let channel_id = get_slice!(1)[0] as usize;
+ if channel_id >= channels.len() { return; }
+ channels.sort_by(|a, b| { a.channel_id.cmp(&b.channel_id) });
+ channelmanager.force_close_channel(&channels[channel_id].channel_id);
+ },
+ _ => return,
+ }
+ loss_detector.handler.process_events();
+ for event in loss_detector.manager.get_and_clear_pending_events() {
+ match event {
+ Event::FundingGenerationReady { temporary_channel_id, channel_value_satoshis, output_script, .. } => {
+ pending_funding_generation.push((temporary_channel_id, channel_value_satoshis, output_script));
+ },
+ Event::FundingBroadcastSafe { funding_txo, .. } => {
+ pending_funding_relay.push(pending_funding_signatures.remove(&funding_txo).unwrap());
+ },
+ Event::PaymentReceived { payment_hash, .. } => {
+ payments_received.push(payment_hash);
+ },
+ Event::PaymentSent {..} => {},
+ Event::PaymentFailed {..} => {},
+ Event::PendingHTLCsForwardable {..} => {
+ should_forward = true;
+ },
+ Event::SpendableOutputs {..} => {},
+ }
+ }
+ }
+}
+
+#[cfg(feature = "afl")]
+#[macro_use] extern crate afl;
+#[cfg(feature = "afl")]
+fn main() {
+ fuzz!(|data| {
+ let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
+ do_test(data, &logger);
+ });
+}
+
+#[cfg(feature = "honggfuzz")]
+#[macro_use] extern crate honggfuzz;
+#[cfg(feature = "honggfuzz")]
+fn main() {
+ loop {
+ fuzz!(|data| {
+ let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
+ do_test(data, &logger);
+ });
+ }
+}
+
+#[cfg(feature = "libfuzzer_fuzz")]
+#[macro_use] extern crate libfuzzer_sys;
+#[cfg(feature = "libfuzzer_fuzz")]
+fuzz_target!(|data: &[u8]| {
+ let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
+ do_test(data, &logger);
+});
+
+extern crate hex;
+#[cfg(test)]
+mod tests {
+ use utils::test_logger;
+ use lightning::util::logger::{Logger, Record};
+ use std::collections::HashMap;
+ use std::sync::{Arc, Mutex};
+
+ #[test]
+ fn duplicate_crash() {
+ let logger: Arc<Logger> = Arc::new(test_logger::TestLogger::new("".to_owned()));
+ super::do_test(&::hex::decode("00").unwrap(), &logger);
+ }
+
+ struct TrackingLogger {
+ /// (module, message) -> count
+ pub lines: Mutex<HashMap<(String, String), usize>>,
+ }
+ impl Logger for TrackingLogger {
+ fn log(&self, record: &Record) {
+ *self.lines.lock().unwrap().entry((record.module_path.to_string(), format!("{}", record.args))).or_insert(0) += 1;
+ println!("{:<5} [{} : {}, {}] {}", record.level.to_string(), record.module_path, record.file, record.line, record.args);
+ }
+ }
+
+ #[test]
+ fn test_no_existing_test_breakage() {
+ // To avoid accidentally causing all existing fuzz test cases to be useless by making minor
+ // changes (such as requesting feerate info in a new place), we run a pretty full
+ // step-through with two peers and HTLC forwarding here. Obviously this is pretty finicky,
+ // so this should be updated pretty liberally, but at least we'll know when changes occur.
+ // If nothing else, this test serves as a pretty great initial full_stack_target seed.
+
+ // What each byte represents is broken down below, and then everything is concatenated into
+ // one large test at the end (you want %s/ -.*//g %s/\n\| \|\t\|\///g).
+
+ // Following BOLT 8, lightning message on the wire are: 2-byte encrypted message length +
+ // 16-byte MAC of the encrypted message length + encrypted Lightning message + 16-byte MAC
+ // of the Lightning message
+ // I.e 2nd inbound read, len 18 : 0006 (encrypted message length) + 03000000000000000000000000000000 (MAC of the encrypted message length)
+ // Len 22 : 0010 00000000 (encrypted lightning message) + 03000000000000000000000000000000 (MAC of the Lightning message)
+
+ // 0000000000000000000000000000000000000000000000000000000000000000 - our network key
+ // 00000000 - fee_proportional_millionths
+ // 01 - announce_channels_publicly
+ //
+ // 00 - new outbound connection with id 0
+ // 030000000000000000000000000000000000000000000000000000000000000000 - peer's pubkey
+ // 030032 - inbound read from peer id 0 of len 50
+ // 00 030000000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - noise act two (0||pubkey||mac)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0006 03000000000000000000000000000000 - message header indicating message length 6
+ // 030016 - inbound read from peer id 0 of len 22
+ // 0010 00000000 03000000000000000000000000000000 - init message with no features (type 16) and mac
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0141 03000000000000000000000000000000 - message header indicating message length 321
+ // 0300fe - inbound read from peer id 0 of len 254
+ // 0020 7500000000000000000000000000000000000000000000000000000000000000 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 000000000000c350 0000000000000000 0000000000000222 ffffffffffffffff 0000000000000222 0000000000000000 000000fd 0006 01e3 030000000000000000000000000000000000000000000000000000000000000001 030000000000000000000000000000000000000000000000000000000000000002 030000000000000000000000000000000000000000000000000000000000000003 030000000000000000000000000000000000000000000000000000000000000004 - beginning of open_channel message
+ // 030053 - inbound read from peer id 0 of len 83
+ // 030000000000000000000000000000000000000000000000000000000000000005 030000000000000000000000000000000000000000000000000000000000000000 01 03000000000000000000000000000000 - rest of open_channel and mac
+ //
+ // 00fd00fd00fd - Three feerate requests (all returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
+ // - client should now respond with accept_channel (CHECK 1: type 33 to peer 03000000)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0084 03000000000000000000000000000000 - message header indicating message length 132
+ // 030094 - inbound read from peer id 0 of len 148
+ // 0022 ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679 3d00000000000000000000000000000000000000000000000000000000000000 0000 5c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 03000000000000000000000000000000 - funding_created and mac
+ // - client should now respond with funding_signed (CHECK 2: type 35 to peer 03000000)
+ //
+ // 0c005e - connect a block with one transaction of len 94
+ // 020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae0000000000000000000000000000000000000000000000000000000000000000000000 - the funding transaction
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // - by now client should have sent a funding_locked (CHECK 3: SendFundingLocked to 03000000 for chan 3d000000)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0043 03000000000000000000000000000000 - message header indicating message length 67
+ // 030053 - inbound read from peer id 0 of len 83
+ // 0024 3d00000000000000000000000000000000000000000000000000000000000000 030100000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - funding_locked and mac
+ //
+ // 01 - new inbound connection with id 1
+ // 030132 - inbound read from peer id 1 of len 50
+ // 0003000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000 - inbound noise act 1
+ // 030142 - inbound read from peer id 1 of len 66
+ // 000302000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003000000000000000000000000000000 - inbound noise act 3
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0006 01000000000000000000000000000000 - message header indicating message length 6
+ // 030116 - inbound read from peer id 1 of len 22
+ // 0010 00000000 01000000000000000000000000000000 - init message with no features (type 16)
+ //
+ // 05 01 030200000000000000000000000000000000000000000000000000000000000000 00c350 0003e8 - create outbound channel to peer 1 for 50k sat
+ // 00fd00fd00fd - Three feerate requests (all returning min feerate) (gonna be ingested by FuzzEstimator)
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0110 01000000000000000000000000000000 - message header indicating message length 272
+ // 0301ff - inbound read from peer id 1 of len 255
+ // 0021 0000000000000000000000000000000000000000000000000000000000000e02 000000000000001a 00000000004c4b40 00000000000003e8 00000000000003e8 00000002 03f0 0005 030000000000000000000000000000000000000000000000000000000000000100 030000000000000000000000000000000000000000000000000000000000000200 030000000000000000000000000000000000000000000000000000000000000300 030000000000000000000000000000000000000000000000000000000000000400 030000000000000000000000000000000000000000000000000000000000000500 03000000000000000000000000000000 - beginning of accept_channel
+ // 030121 - inbound read from peer id 1 of len 33
+ // 0000000000000000000000000000000000 01000000000000000000000000000000 - rest of accept_channel and mac
+ //
+ // 0a - create the funding transaction (client should send funding_created now)
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0062 01000000000000000000000000000000 - message header indicating message length 98
+ // 030172 - inbound read from peer id 1 of len 114
+ // 0023 3900000000000000000000000000000000000000000000000000000000000000 f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 01000000000000000000000000000000 - funding_signed message and mac
+ //
+ // 0b - broadcast funding transaction
+ // - by now client should have sent a funding_locked (CHECK 4: SendFundingLocked to 03020000 for chan 3f000000)
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0043 01000000000000000000000000000000 - message header indicating message length 67
+ // 030153 - inbound read from peer id 1 of len 83
+ // 0024 3900000000000000000000000000000000000000000000000000000000000000 030100000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - funding_locked and mac
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
+ // 0300ff - inbound read from peer id 0 of len 255
+ // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 00000121 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300c1 - inbound read from peer id 0 of len 193
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ef00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
+ //
+ // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0064 03000000000000000000000000000000 - message header indicating message length 100
+ // 030074 - inbound read from peer id 0 of len 116
+ // 0084 3d00000000000000000000000000000000000000000000000000000000000000 4d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0000 03000000000000000000000000000000 - commitment_signed and mac
+ // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6: types 133 and 132 to peer 03000000)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0063 03000000000000000000000000000000 - message header indicating message length 99
+ // 030073 - inbound read from peer id 0 of len 115
+ // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 030200000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // 07 - process the now-pending HTLC forward
+ // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: SendHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
+ //
+ // - we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0064 01000000000000000000000000000000 - message header indicating message length 100
+ // 030174 - inbound read from peer id 1 of len 116
+ // 0084 3900000000000000000000000000000000000000000000000000000000000000 f1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0063 01000000000000000000000000000000 - message header indicating message length 99
+ // 030173 - inbound read from peer id 1 of len 115
+ // 0085 3900000000000000000000000000000000000000000000000000000000000000 0000000000000000000000000000000000000000000000000000000000000000 030200000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 004a 01000000000000000000000000000000 - message header indicating message length 74
+ // 03015a - inbound read from peer id 1 of len 90
+ // 0082 3900000000000000000000000000000000000000000000000000000000000000 0000000000000000 ff00888888888888888888888888888888888888888888888888888888888888 01000000000000000000000000000000 - update_fulfill_htlc and mac
+ // - client should immediately claim the pending HTLC from peer 0 (CHECK 8: SendFulfillHTLCs for node 03000000 with preimage ff00888888 for channel 3d000000)
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0064 01000000000000000000000000000000 - message header indicating message length 100
+ // 030174 - inbound read from peer id 1 of len 116
+ // 0084 3900000000000000000000000000000000000000000000000000000000000000 fd000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0063 01000000000000000000000000000000 - message header indicating message length 99
+ // 030173 - inbound read from peer id 1 of len 115
+ // 0085 3900000000000000000000000000000000000000000000000000000000000000 0100000000000000000000000000000000000000000000000000000000000000 030300000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // - before responding to the commitment_signed generated above, send a new HTLC
+ // 030012 - inbound read from peer id 0 of len 18
+ // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
+ // 0300ff - inbound read from peer id 0 of len 255
+ // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000000000003e80 ff00000000000000000000000000000000000000000000000000000000000000 00000121 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300c1 - inbound read from peer id 0 of len 193
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ef00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
+ //
+ // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
+ //
+ // - now respond to the update_fulfill_htlc+commitment_signed messages the client sent to peer 0
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0063 03000000000000000000000000000000 - message header indicating message length 99
+ // 030073 - inbound read from peer id 0 of len 115
+ // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0100000000000000000000000000000000000000000000000000000000000000 030300000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
+ // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0064 03000000000000000000000000000000 - message header indicating message length 100
+ // 030074 - inbound read from peer id 0 of len 116
+ // 0084 3d00000000000000000000000000000000000000000000000000000000000000 be000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0000 03000000000000000000000000000000 - commitment_signed and mac
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0063 03000000000000000000000000000000 - message header indicating message length 99
+ // 030073 - inbound read from peer id 0 of len 115
+ // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0200000000000000000000000000000000000000000000000000000000000000 030400000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // 07 - process the now-pending HTLC forward
+ // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
+ // - we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0064 01000000000000000000000000000000 - message header indicating message length 100
+ // 030174 - inbound read from peer id 1 of len 116
+ // 0084 3900000000000000000000000000000000000000000000000000000000000000 fc000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0063 01000000000000000000000000000000 - message header indicating message length 99
+ // 030173 - inbound read from peer id 1 of len 115
+ // 0085 3900000000000000000000000000000000000000000000000000000000000000 0200000000000000000000000000000000000000000000000000000000000000 030400000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 002c 01000000000000000000000000000000 - message header indicating message length 44
+ // 03013c - inbound read from peer id 1 of len 60
+ // 0083 3900000000000000000000000000000000000000000000000000000000000000 0000000000000001 0000 01000000000000000000000000000000 - update_fail_htlc and mac
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0064 01000000000000000000000000000000 - message header indicating message length 100
+ // 030174 - inbound read from peer id 1 of len 116
+ // 0084 3900000000000000000000000000000000000000000000000000000000000000 fb000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100 0000 01000000000000000000000000000000 - commitment_signed and mac
+ //
+ // 030112 - inbound read from peer id 1 of len 18
+ // 0063 01000000000000000000000000000000 - message header indicating message length 99
+ // 030173 - inbound read from peer id 1 of len 115
+ // 0085 3900000000000000000000000000000000000000000000000000000000000000 0300000000000000000000000000000000000000000000000000000000000000 030500000000000000000000000000000000000000000000000000000000000000 01000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // 07 - process the now-pending HTLC forward
+ // - client now sends id 0 update_fail_htlc and commitment_signed (CHECK 9)
+ // - now respond to the update_fail_htlc+commitment_signed messages the client sent to peer 0
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0063 03000000000000000000000000000000 - message header indicating message length 99
+ // 030073 - inbound read from peer id 0 of len 115
+ // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0300000000000000000000000000000000000000000000000000000000000000 030500000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0064 03000000000000000000000000000000 - message header indicating message length 100
+ // 030074 - inbound read from peer id 0 of len 116
+ // 0084 3d00000000000000000000000000000000000000000000000000000000000000 4f000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0000 03000000000000000000000000000000 - commitment_signed and mac
+ // - client should now respond with revoke_and_ack (CHECK 5 duplicate)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 05ac 03000000000000000000000000000000 - message header indicating message length 1452
+ // 0300ff - inbound read from peer id 0 of len 255
+ // 0080 3d00000000000000000000000000000000000000000000000000000000000000 0000000000000002 00000000000b0838 ff00000000000000000000000000000000000000000000000000000000000000 00000121 00 030000000000000000000000000000000000000000000000000000000000000555 0000000e0000010000000000000003e800000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000 ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff - beginning of update_add_htlc from 0 to 1 via client
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300ff - inbound read from peer id 0 of len 255
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+ // 0300c1 - inbound read from peer id 0 of len 193
+ // ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff ef00000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - end of update_add_htlc from 0 to 1 via client and mac
+ //
+ // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 00a4 03000000000000000000000000000000 - message header indicating message length 164
+ // 0300b4 - inbound read from peer id 0 of len 180
+ // 0084 3d00000000000000000000000000000000000000000000000000000000000000 07000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 0001 c8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007f00000000000000 03000000000000000000000000000000 - commitment_signed and mac
+ // - client should now respond with revoke_and_ack and commitment_signed (CHECK 5/6 duplicates)
+ //
+ // 030012 - inbound read from peer id 0 of len 18
+ // 0063 03000000000000000000000000000000 - message header indicating message length 99
+ // 030073 - inbound read from peer id 0 of len 115
+ // 0085 3d00000000000000000000000000000000000000000000000000000000000000 0400000000000000000000000000000000000000000000000000000000000000 030600000000000000000000000000000000000000000000000000000000000000 03000000000000000000000000000000 - revoke_and_ack and mac
+ //
+ // 07 - process the now-pending HTLC forward
+ // - client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
+ //
+ // 0c007d - connect a block with one transaction of len 125
+ // 0200000001390000000000000000000000000000000000000000000000000000000000000000000000000000008002000100000000000022002090000000000000000000000000000000000000000000000000000000000000006cc10000000000001600145c0000000000000000000000000000000000000005000020 - the commitment transaction for channel 3f00000000000000000000000000000000000000000000000000000000000000
+ // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
+ // 00fd - A feerate request (returning min feerate, which our open_channel also uses) (gonna be ingested by FuzzEstimator)
+ // 0c005e - connect a block with one transaction of len 94
+ // 0200000001fd00000000000000000000000000000000000000000000000000000000000000000000000000000000014f00000000000000220020f60000000000000000000000000000000000000000000000000000000000000000000000 - the funding transaction
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ // 0c0000 - connect a block with no transactions
+ //
+ // 07 - process the now-pending HTLC forward
+ // - client now fails the HTLC backwards as it was unable to extract the payment preimage (CHECK 9 duplicate and CHECK 10)
+
+ let logger = Arc::new(TrackingLogger { lines: Mutex::new(HashMap::new()) });
+ super::do_test(&::hex::decode("00000000000000000000000000000000000000000000000000000000000000000000000001000300000000000000000000000000000000000000000000000000000000000000000300320003000000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000030012000603000000000000000000000000000000030016001000000000030000000000000000000000000000000300120141030000000000000000000000000000000300fe00207500000000000000000000000000000000000000000000000000000000000000ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679000000000000c35000000000000000000000000000000222ffffffffffffffff00000000000002220000000000000000000000fd000601e3030000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000000000000000000000000000000000002030000000000000000000000000000000000000000000000000000000000000003030000000000000000000000000000000000000000000000000000000000000004030053030000000000000000000000000000000000000000000000000000000000000005030000000000000000000000000000000000000000000000000000000000000000010300000000000000000000000000000000fd00fd00fd0300120084030000000000000000000000000000000300940022ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb1819096793d0000000000000000000000000000000000000000000000000000000000000000005c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001030000000000000000000000000000000c005e020000000100000000000000000000000000000000000000000000000000000000000000000000000000ffffffff0150c3000000000000220020ae00000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c00000c000003001200430300000000000000000000000000000003005300243d000000000000000000000000000000000000000000000000000000000000000301000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000001030132000300000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003014200030200000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000300000000000000000000000000000003011200060100000000000000000000000000000003011600100000000001000000000000000000000000000000050103020000000000000000000000000000000000000000000000000000000000000000c3500003e800fd00fd00fd0301120110010000000000000000000000000000000301ff00210000000000000000000000000000000000000000000000000000000000000e02000000000000001a00000000004c4b4000000000000003e800000000000003e80000000203f00005030000000000000000000000000000000000000000000000000000000000000100030000000000000000000000000000000000000000000000000000000000000200030000000000000000000000000000000000000000000000000000000000000300030000000000000000000000000000000000000000000000000000000000000400030000000000000000000000000000000000000000000000000000000000000500030000000000000000000000000000000301210000000000000000000000000000000000010000000000000000000000000000000a03011200620100000000000000000000000000000003017200233900000000000000000000000000000000000000000000000000000000000000f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100010000000000000000000000000000000b030112004301000000000000000000000000000000030153002439000000000000000000000000000000000000000000000000000000000000000301000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003e80ff0000000000000000000000000000000000000000000000000000000000000000000121000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffef000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000004d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000030200000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000f100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003020000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112004a0100000000000000000000000000000003015a008239000000000000000000000000000000000000000000000000000000000000000000000000000000ff008888888888888888888888888888888888888888888888888888888888880100000000000000000000000000000003011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000fd0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000010000000000000000000000000000000301120063010000000000000000000000000000000301730085390000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000303000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d0000000000000000000000000000000000000000000000000000000000000000000000000000010000000000003e80ff0000000000000000000000000000000000000000000000000000000000000000000121000300000000000000000000000000000000000000000000000000000000000005550000000e000001000000000000000003e8000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffef000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200630300000000000000000000000000000003007300853d0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000303000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d00000000000000000000000000000000000000000000000000000000000000be00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000030400000000000000000000000000000000000000000000000000000000000000030000000000000000000000000000000703011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000fc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000100000000000000000000000000000003011200630100000000000000000000000000000003017300853900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003040000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000030112002c0100000000000000000000000000000003013c00833900000000000000000000000000000000000000000000000000000000000000000000000000000100000100000000000000000000000000000003011200640100000000000000000000000000000003017400843900000000000000000000000000000000000000000000000000000000000000fb000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000001000000000000000000000000000000030112006301000000000000000000000000000000030173008539000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000000000000000000000000000000000030500000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000703001200630300000000000000000000000000000003007300853d0000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000305000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000003001200640300000000000000000000000000000003007400843d000000000000000000000000000000000000000000000000000000000000004f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000300000000000000000000000000000003001205ac030000000000000000000000000000000300ff00803d00000000000000000000000000000000000000000000000000000000000000000000000000000200000000000b0838ff0000000000000000000000000000000000000000000000000000000000000000000121000300000000000000000000000000000000000000000000000000000000000005550000000e0000010000000000000003e800000000010000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0300c1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffef000000000000000000000000000000000000000000000000000000000000000300000000000000000000000000000000fd03001200a4030000000000000000000000000000000300b400843d00000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010001c8000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007f000000000000000300000000000000000000000000000003001200630300000000000000000000000000000003007300853d00000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000003060000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000070c007d0200000001390000000000000000000000000000000000000000000000000000000000000000000000000000008002000100000000000022002090000000000000000000000000000000000000000000000000000000000000006cc10000000000001600145c000000000000000000000000000000000000000500002000fd00fd0c005e0200000001fd00000000000000000000000000000000000000000000000000000000000000000000000000000000014f00000000000000220020f600000000000000000000000000000000000000000000000000000000000000000000000c00000c00000c00000c00000c000007").unwrap(), &(Arc::clone(&logger) as Arc<Logger>));
+
+ let log_entries = logger.lines.lock().unwrap();
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendAcceptChannel event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel ff4f00f805273c1b203bb5ebf8436bfde57b3be8c2f5e95d9491dbb181909679".to_string())), Some(&1)); // 1
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingSigned event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 2
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 3
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendFundingLocked event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 for channel 3900000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 4
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling SendRevokeAndACK event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&4)); // 5
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 0 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 6
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030200000000000000000000000000000000000000000000000000000000000000 with 1 adds, 0 fulfills, 0 fails for channel 3900000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&3)); // 7
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 1 fulfills, 0 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&1)); // 8
+ assert_eq!(log_entries.get(&("lightning::ln::peer_handler".to_string(), "Handling UpdateHTLCs event in peer_handler for node 030000000000000000000000000000000000000000000000000000000000000000 with 0 adds, 0 fulfills, 1 fails for channel 3d00000000000000000000000000000000000000000000000000000000000000".to_string())), Some(&2)); // 9
+ assert_eq!(log_entries.get(&("lightning::ln::channelmonitor".to_string(), "Input spending remote commitment tx (00000000000000000000000000000000000000000000000000000000000000fd:0) in 0000000000000000000000000000000000000000000000000000000000000044 resolves outbound HTLC with payment hash ff00000000000000000000000000000000000000000000000000000000000000 with timeout".to_string())), Some(&1)); // 10
+ }
+}
--- /dev/null
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
+//! Functional tests which test the correct handling of ChannelMonitorUpdateErr returns from
+//! monitor updates.
+//! There are a bunch of these as their handling is relatively error-prone so they are split out
+//! here. See also the chanmon_fail_consistency fuzz test.
+
+use ln::channelmanager::{RAACommitmentOrder, PaymentPreimage, PaymentHash};
+use ln::channelmonitor::ChannelMonitorUpdateErr;
+use ln::msgs;
+use ln::msgs::{ChannelMessageHandler, LocalFeatures, RoutingMessageHandler};
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::errors::APIError;
+
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::Hash;
+
+use ln::functional_test_utils::*;
+
+#[test]
+fn test_simple_monitor_permanent_update_fail() {
+ // Test that we handle a simple permanent monitor update failure
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
+ if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route, payment_hash_1) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_1.len(), 2);
+ match events_1[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ };
+ match events_1[1] {
+ MessageSendEvent::HandleError { node_id, .. } => assert_eq!(node_id, nodes[1].node.get_our_node_id()),
+ _ => panic!("Unexpected event"),
+ };
+
+ // TODO: Once we hit the chain with the failure transaction we should check that we get a
+ // PaymentFailed event
+
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+}
+
+fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
+ // Test that we can recover from a simple temporary monitor update failure optionally with
+ // a disconnect in between
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_1) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ if disconnect {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ let payment_event = SendEvent::from_event(events_2.pop().unwrap());
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let events_3 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(payment_hash_1, *payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+
+ // Now set it to failed again...
+ let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route, payment_hash_2) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ if disconnect {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ // ...and make sure we can force-close a TemporaryFailure channel with a PermanentFailure
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+ check_closed_broadcast!(nodes[0]);
+
+ // TODO: Once we hit the chain with the failure transaction we should check that we get a
+ // PaymentFailed event
+
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+}
+
+#[test]
+fn test_simple_monitor_temporary_update_fail() {
+ do_test_simple_monitor_temporary_update_fail(false);
+ do_test_simple_monitor_temporary_update_fail(true);
+}
+
+fn do_test_monitor_temporary_update_fail(disconnect_count: usize) {
+ let disconnect_flags = 8 | 16;
+
+ // Test that we can recover from a temporary monitor update failure with some in-flight
+ // HTLCs going on at the same time potentially with some disconnection thrown in.
+ // * First we route a payment, then get a temporary monitor update failure when trying to
+ // route a second payment. We then claim the first payment.
+ // * If disconnect_count is set, we will disconnect at this point (which is likely as
+ // TemporaryFailure likely indicates net disconnect which resulted in failing to update
+ // the ChannelMonitor on a watchtower).
+ // * If !(disconnect_count & 16) we deliver a update_fulfill_htlc/CS for the first payment
+ // immediately, otherwise we wait disconnect and deliver them via the reconnect
+ // channel_reestablish processing (ie disconnect_count & 16 makes no sense if
+ // disconnect_count & !disconnect_flags is 0).
+ // * We then update the channel monitor, reconnecting if disconnect_count is set and walk
+ // through message sending, potentially disconnect/reconnecting multiple times based on
+ // disconnect_count, to get the update_fulfill_htlc through.
+ // * We then walk through more message exchanges to get the original update_add_htlc
+ // through, swapping message ordering based on disconnect_count & 8 and optionally
+ // disconnect/reconnecting based on disconnect_count.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ // Now try to send a second payment which will fail to send
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ if let Err(APIError::MonitorUpdateFailed) = nodes[0].node.send_payment(route.clone(), payment_hash_2) {} else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+
+ // Claim the previous payment, which will result in a update_fulfill_htlc/CS from nodes[1]
+ // but nodes[0] won't respond since it is frozen.
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+ let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+
+ if (disconnect_count & 16) == 0 {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
+ let events_3 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed) {
+ assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
+ } else { panic!(); }
+ }
+
+ (update_fulfill_htlcs[0].clone(), commitment_signed.clone())
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ if disconnect_count & !disconnect_flags > 0 {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ }
+
+ // Now fix monitor updating...
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+
+ macro_rules! disconnect_reconnect_peers { () => { {
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ assert_eq!(reestablish_1.len(), 1);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ assert_eq!(reestablish_2.len(), 1);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ assert!(as_resp.0.is_none());
+ assert!(bs_resp.0.is_none());
+
+ (reestablish_1, reestablish_2, as_resp, bs_resp)
+ } } }
+
+ let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 {
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ assert_eq!(reestablish_1.len(), 1);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ assert_eq!(reestablish_2.len(), 1);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ check_added_monitors!(nodes[1], 0);
+ let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ assert!(as_resp.0.is_none());
+ assert!(bs_resp.0.is_none());
+
+ assert!(bs_resp.1.is_none());
+ if (disconnect_count & 16) == 0 {
+ assert!(bs_resp.2.is_none());
+
+ assert!(as_resp.1.is_some());
+ assert!(as_resp.2.is_some());
+ assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
+ } else {
+ assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty());
+ assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
+ assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
+ assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none());
+ assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]);
+ assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed);
+
+ assert!(as_resp.1.is_none());
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]).unwrap();
+ let events_3 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed).unwrap();
+ let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ as_resp.1 = Some(as_resp_raa);
+ bs_resp.2 = None;
+ }
+
+ if disconnect_count & !disconnect_flags > 1 {
+ let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!();
+
+ if (disconnect_count & 16) == 0 {
+ assert!(reestablish_1 == second_reestablish_1);
+ assert!(reestablish_2 == second_reestablish_2);
+ }
+ assert!(as_resp == second_as_resp);
+ assert!(bs_resp == second_bs_resp);
+ }
+
+ (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), as_resp.2.unwrap()), as_resp.1.unwrap())
+ } else {
+ let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_4.len(), 2);
+ (SendEvent::from_event(events_4.remove(0)), match events_4[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ })
+ };
+
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[1], 1);
+
+ if disconnect_count & !disconnect_flags > 2 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
+ assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
+
+ assert!(as_resp.2.is_none());
+ assert!(bs_resp.2.is_none());
+ }
+
+ let as_commitment_update;
+ let bs_second_commitment_update;
+
+ macro_rules! handle_bs_raa { () => {
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert!(as_commitment_update.update_add_htlcs.is_empty());
+ assert!(as_commitment_update.update_fulfill_htlcs.is_empty());
+ assert!(as_commitment_update.update_fail_htlcs.is_empty());
+ assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty());
+ assert!(as_commitment_update.update_fee.is_none());
+ check_added_monitors!(nodes[0], 1);
+ } }
+
+ macro_rules! handle_initial_raa { () => {
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &initial_revoke_and_ack).unwrap();
+ bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(bs_second_commitment_update.update_add_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fail_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty());
+ assert!(bs_second_commitment_update.update_fee.is_none());
+ check_added_monitors!(nodes[1], 1);
+ } }
+
+ if (disconnect_count & 8) == 0 {
+ handle_bs_raa!();
+
+ if disconnect_count & !disconnect_flags > 3 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.unwrap() == initial_revoke_and_ack);
+ assert!(bs_resp.1.is_none());
+
+ assert!(as_resp.2.unwrap() == as_commitment_update);
+ assert!(bs_resp.2.is_none());
+
+ assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
+ }
+
+ handle_initial_raa!();
+
+ if disconnect_count & !disconnect_flags > 4 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.is_none());
+ assert!(bs_resp.1.is_none());
+
+ assert!(as_resp.2.unwrap() == as_commitment_update);
+ assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
+ }
+ } else {
+ handle_initial_raa!();
+
+ if disconnect_count & !disconnect_flags > 3 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.is_none());
+ assert!(bs_resp.1.unwrap() == bs_revoke_and_ack);
+
+ assert!(as_resp.2.is_none());
+ assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
+
+ assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst);
+ }
+
+ handle_bs_raa!();
+
+ if disconnect_count & !disconnect_flags > 4 {
+ let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!();
+
+ assert!(as_resp.1.is_none());
+ assert!(bs_resp.1.is_none());
+
+ assert!(as_resp.2.unwrap() == as_commitment_update);
+ assert!(bs_resp.2.unwrap() == bs_second_commitment_update);
+ }
+ }
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed).unwrap();
+ let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed).unwrap();
+ let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let events_5 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_5.len(), 1);
+ match events_5[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(payment_hash_2, *payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+#[test]
+fn test_monitor_temporary_update_fail_a() {
+ do_test_monitor_temporary_update_fail(0);
+ do_test_monitor_temporary_update_fail(1);
+ do_test_monitor_temporary_update_fail(2);
+ do_test_monitor_temporary_update_fail(3);
+ do_test_monitor_temporary_update_fail(4);
+ do_test_monitor_temporary_update_fail(5);
+}
+
+#[test]
+fn test_monitor_temporary_update_fail_b() {
+ do_test_monitor_temporary_update_fail(2 | 8);
+ do_test_monitor_temporary_update_fail(3 | 8);
+ do_test_monitor_temporary_update_fail(4 | 8);
+ do_test_monitor_temporary_update_fail(5 | 8);
+}
+
+#[test]
+fn test_monitor_temporary_update_fail_c() {
+ do_test_monitor_temporary_update_fail(1 | 16);
+ do_test_monitor_temporary_update_fail(2 | 16);
+ do_test_monitor_temporary_update_fail(3 | 16);
+ do_test_monitor_temporary_update_fail(2 | 8 | 16);
+ do_test_monitor_temporary_update_fail(3 | 8 | 16);
+}
+
+#[test]
+fn test_monitor_update_fail_cs() {
+ // Tests handling of a monitor update failure when processing an incoming commitment_signed
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+ let responses = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(responses.len(), 2);
+
+ match responses[0] {
+ MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &msg).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match responses[1] {
+ MessageSendEvent::UpdateHTLCs { ref updates, ref node_id } => {
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+
+ let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &final_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentReceived { payment_hash, amt } => {
+ assert_eq!(payment_hash, our_payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+}
+
+#[test]
+fn test_monitor_update_fail_no_rebroadcast() {
+ // Tests handling of a monitor update failure when no message rebroadcasting on
+ // test_restore_channel_monitor() is required. Backported from
+ // chanmon_fail_consistency fuzz tests.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
+ let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentReceived { payment_hash, .. } => {
+ assert_eq!(payment_hash, our_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+}
+
+#[test]
+fn test_monitor_update_raa_while_paused() {
+ // Tests handling of an RAA while monitor updating has already been marked failed.
+ // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ send_payment(&nodes[0], &[&nodes[1]], 5000000);
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, our_payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
+
+ let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, our_payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[1].node.send_payment(route, our_payment_hash_2).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0));
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]).unwrap();
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap_err() {
+ assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
+ } else { panic!(); }
+ check_added_monitors!(nodes[0], 1);
+
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_update_raa.0).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_raa.1).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ expect_pending_htlcs_forwardable!(nodes[0]);
+ expect_payment_received!(nodes[0], our_payment_hash_2, 1000000);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], our_payment_hash_1, 1000000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+ claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_2);
+}
+
+fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) {
+ // Tests handling of a monitor update failure when processing an incoming RAA
+ let mut nodes = create_network(3, &[None, None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance a bit so that we can send backwards from 2 to 1.
+ send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
+
+ // Route a first payment that we'll fail backwards
+ let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+
+ // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
+ assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
+
+ let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
+ check_added_monitors!(nodes[0], 0);
+
+ // While the second channel is AwaitingRAA, forward a second payment to get it into the
+ // holding cell.
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 0);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now fail monitor updating.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::IgnoreError) }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ // Attempt to forward a third payment but fail due to the second channel being unavailable
+ // for forwarding.
+
+ let (_, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ nodes[0].node.send_payment(route, payment_hash_3).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(()); // We succeed in updating the monitor for the first channel
+ send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true);
+ check_added_monitors!(nodes[1], 0);
+
+ let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2.remove(0) {
+ MessageSendEvent::UpdateHTLCs { node_id, updates } => {
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
+
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ match msg_events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
+ assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
+ assert_eq!(msg.contents.flags & 2, 2); // temp disabled
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+ assert_eq!(payment_hash, payment_hash_3);
+ assert!(!rejected_by_dest);
+ } else { panic!("Unexpected event!"); }
+ },
+ _ => panic!("Unexpected event type!"),
+ };
+
+ let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs {
+ // Try to route another payment backwards from 2 to make sure 1 holds off on responding
+ let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[0]);
+ let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ nodes[2].node.send_payment(route, payment_hash_4).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::IgnoreError }) = nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg) {
+ assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
+ } else { panic!(); }
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ (Some(payment_preimage_4), Some(payment_hash_4))
+ } else { (None, None) };
+
+ // Restore monitor updating, ensuring we immediately get a fail-back update and a
+ // update_add update.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
+ if test_ignore_second_cs {
+ assert_eq!(events_3.len(), 3);
+ } else {
+ assert_eq!(events_3.len(), 2);
+ }
+
+ // Note that the ordering of the events for different nodes is non-prescriptive, though the
+ // ordering of the two events that both go to nodes[2] have to stay in the same order.
+ let messages_a = match events_3.pop().unwrap() {
+ MessageSendEvent::UpdateHTLCs { node_id, mut updates } => {
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ (updates.update_fail_htlcs.remove(0), updates.commitment_signed)
+ },
+ _ => panic!("Unexpected event type!"),
+ };
+ let raa = if test_ignore_second_cs {
+ match events_3.remove(1) {
+ MessageSendEvent::SendRevokeAndACK { node_id, msg } => {
+ assert_eq!(node_id, nodes[2].node.get_our_node_id());
+ Some(msg.clone())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else { None };
+ let send_event_b = SendEvent::from_event(events_3.remove(0));
+ assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id());
+
+ // Now deliver the new messages...
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &messages_a.0).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false);
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events_4[0] {
+ assert_eq!(payment_hash, payment_hash_1);
+ assert!(rejected_by_dest);
+ } else { panic!("Unexpected event!"); }
+
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]).unwrap();
+ if test_ignore_second_cs {
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg).unwrap();
+ check_added_monitors!(nodes[2], 1);
+ let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa.unwrap()).unwrap();
+ check_added_monitors!(nodes[2], 1);
+ let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(bs_cs.update_add_htlcs.is_empty());
+ assert!(bs_cs.update_fail_htlcs.is_empty());
+ assert!(bs_cs.update_fail_malformed_htlcs.is_empty());
+ assert!(bs_cs.update_fulfill_htlcs.is_empty());
+ assert!(bs_cs.update_fee.is_none());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ assert!(as_cs.update_add_htlcs.is_empty());
+ assert!(as_cs.update_fail_htlcs.is_empty());
+ assert!(as_cs.update_fail_malformed_htlcs.is_empty());
+ assert!(as_cs.update_fulfill_htlcs.is_empty());
+ assert!(as_cs.update_fee.is_none());
+
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
+ check_added_monitors!(nodes[2], 1);
+ let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[2], 1);
+ assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_second_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ } else {
+ commitment_signed_dance!(nodes[2], nodes[1], send_event_b.commitment_msg, false);
+ }
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+
+ let events_6 = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events_6.len(), 1);
+ match events_6[0] {
+ Event::PaymentReceived { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); },
+ _ => panic!("Unexpected event"),
+ };
+
+ if test_ignore_second_cs {
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ send_event = SendEvent::from_node(&nodes[1]);
+ assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id());
+ assert_eq!(send_event.msgs.len(), 1);
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+
+ let events_9 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_9.len(), 1);
+ match events_9[0] {
+ Event::PaymentReceived { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()),
+ _ => panic!("Unexpected event"),
+ };
+ claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap());
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2);
+}
+
+#[test]
+fn test_monitor_update_fail_raa() {
+ do_test_monitor_update_fail_raa(false);
+ do_test_monitor_update_fail_raa(true);
+}
+
+#[test]
+fn test_monitor_update_fail_reestablish() {
+ // Simple test for message retransmission after monitor update failure on
+ // channel_reestablish generating a monitor update (which comes from freeing holding cell
+ // HTLCs).
+ let mut nodes = create_network(3, &[None, None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+
+ assert!(nodes[2].node.claim_funds(our_payment_preimage));
+ check_added_monitors!(nodes[2], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+
+ let as_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ let bs_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
+
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+
+ assert!(as_reestablish == get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()));
+ assert!(bs_reestablish == get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()));
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reestablish).unwrap();
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reestablish).unwrap();
+ check_added_monitors!(nodes[1], 0);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { payment_preimage, .. } => assert_eq!(payment_preimage, our_payment_preimage),
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn raa_no_response_awaiting_raa_state() {
+ // This is a rather convoluted test which ensures that if handling of an RAA does not happen
+ // due to a previous monitor update failure, we still set AwaitingRemoteRevoke on the channel
+ // in question (assuming it intends to respond with a CS after monitor updating is restored).
+ // Backported from chanmon_fail_consistency fuzz tests as this used to be broken.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
+
+ // Queue up two payments - one will be delivered right away, one immediately goes into the
+ // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA
+ // immediately after a CS. By setting failing the monitor update failure from the CS (which
+ // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS
+ // generation during RAA while in monitor-update-failed state.
+ nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 0);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from
+ // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA,
+ // then restore channel monitor updates.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
+ assert_eq!(err, "Previous monitor update failure prevented responses to RAA");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ // nodes[1] should be AwaitingRAA here!
+ check_added_monitors!(nodes[1], 1);
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_1, 1000000);
+
+ // We send a third payment here, which is somewhat of a redundant test, but the
+ // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync
+ // commitment transaction states) whereas here we can explicitly check for it.
+ nodes[0].node.send_payment(route.clone(), payment_hash_3).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ // Finally deliver the RAA to nodes[1] which results in a CS response to the last update
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+ let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_3, 1000000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
+}
+
+#[test]
+fn claim_while_disconnected_monitor_update_fail() {
+ // Test for claiming a payment while disconnected and then having the resulting
+ // channel-update-generated monitor update fail. This kind of thing isn't a particularly
+ // contrived case for nodes with network instability.
+ // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
+ // code introduced a regression in this test (specifically, this caught a removal of the
+ // channel_reestablish handling ensuring the order was sensical given the messages used).
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Forward a payment for B to claim
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+
+ let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor
+ // update.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Send a second payment from A to B, resulting in a commitment update that gets swallowed with
+ // the monitor still failed
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]).unwrap();
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed).unwrap_err() {
+ assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
+ } else { panic!(); }
+ // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC
+ // until we've test_restore_channel_monitor'd and updated for the new commitment transaction.
+
+ // Now un-fail the monitor, which will result in B sending its original commitment update,
+ // receiving the commitment update from A, and the resulting commitment dances.
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(bs_msgs.len(), 2);
+
+ match bs_msgs[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ match bs_msgs[1] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), msg).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+#[test]
+fn monitor_failed_no_reestablish_response() {
+ // Test for receiving a channel_reestablish after a monitor update failure resulted in no
+ // response to a commitment_signed.
+ // Backported from chanmon_fail_consistency fuzz tests as it caught a long-standing
+ // debug_assert!() failure in channel_reestablish handling.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Route the payment and deliver the initial commitment_signed (with a monitor update failure
+ // on receipt).
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1]
+ // is still failing to update monitors.
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+
+ let as_reconnect = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ let bs_reconnect = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect).unwrap();
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_reconnect).unwrap();
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_1, 1000000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+}
+
+#[test]
+fn first_message_on_recv_ordering() {
+ // Test that if the initial generator of a monitor-update-frozen state doesn't generate
+ // messages, we're willing to flip the order of response messages if neccessary in resposne to
+ // a commitment_signed which needs to send an RAA first.
+ // At a high level, our goal is to fail monitor updating in response to an RAA which needs no
+ // response and then handle a CS while in the failed state, requiring an RAA followed by a CS
+ // response. To do this, we start routing two payments, with the final RAA for the first being
+ // delivered while B is in AwaitingRAA, hence when we deliver the CS for the second B will
+ // have no pending response but will want to send a RAA/CS (with the updates for the second
+ // payment applied).
+ // Backported from chanmon_fail_consistency fuzz tests as it caught a bug here.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Route the first payment outbound, holding the last RAA for B until we are set up so that we
+ // can deliver it and fail the monitor update.
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ // Route the second payment, generating an update_add_htlc/commitment_signed
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+
+ // Deliver the final RAA for the first payment, which does not require a response. RAAs
+ // generally require a commitment_signed, so the fact that we're expecting an opposite response
+ // to the next message also tests resetting the delivery order.
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap_err() {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ } else { panic!(); }
+ check_added_monitors!(nodes[1], 1);
+
+ // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an
+ // RAA/CS response, which should be generated when we call test_restore_channel_monitor (with
+ // the appropriate HTLC acceptance).
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
- if let msgs::HandleError { err, action: Some(msgs::ErrorAction::IgnoreError) } = funding_signed_res.unwrap_err() {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap_err() {
+ assert_eq!(err, "Previous monitor update failure prevented generation of RAA");
+ } else { panic!(); }
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_1, 1000000);
+
+ let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_responses.0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_responses.1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+#[test]
+fn test_monitor_update_fail_claim() {
+ // Basic test for monitor update failures when processing claim_funds calls.
+ // We set up a simple 3-node network, sending a payment from A to B and failing B's monitor
+ // update to claim the payment. We then send a payment C->B->A, making the forward of this
+ // payment from B to A fail due to the paused channel. Finally, we restore the channel monitor
+ // updating and claim the payment on B.
+ let mut nodes = create_network(3, &[None, None, None]);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance a bit so that we can send backwards from 3 to 2.
+ send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
+
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+
+ let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[2].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be
+ // paused, so forward shouldn't succeed until we call test_restore_channel_monitor().
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+
+ let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true);
+
+ let bs_fail_update = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_update.update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[2], nodes[1], bs_fail_update.commitment_signed, false, true);
+
+ let msg_events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ match msg_events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
+ assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
+ assert_eq!(msg.contents.flags & 2, 2); // temp disabled
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+ assert_eq!(payment_hash, payment_hash_2);
+ assert!(!rejected_by_dest);
+ } else { panic!("Unexpected event!"); }
+
+ // Now restore monitor updating on the 0<->1 channel and claim the funds on B.
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let Event::PaymentSent { payment_preimage, .. } = events[0] {
+ assert_eq!(payment_preimage, payment_preimage_1);
+ } else { panic!("Unexpected event!"); }
+}
+
+#[test]
+fn test_monitor_update_on_pending_forwards() {
+ // Basic test for monitor update failures when processing pending HTLC fail/add forwards.
+ // We do this with a simple 3-node network, sending a payment from A to C and one from C to A.
+ // The payment from A to C will be failed by C and pending a back-fail to A, while the payment
+ // from C to A will be pending a forward to A.
+ let mut nodes = create_network(3, &[None, None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance a bit so that we can send backwards from 3 to 1.
+ send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000);
+
+ let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+ assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1));
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+
+ let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let route = nodes[2].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[2].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ let mut events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ if let Event::PaymentFailed { payment_hash, rejected_by_dest, .. } = events[0] {
+ assert_eq!(payment_hash, payment_hash_1);
+ assert!(rejected_by_dest);
+ } else { panic!("Unexpected event!"); }
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[0].node.process_pending_htlc_forwards();
+ expect_payment_received!(nodes[0], payment_hash_2, 1000000);
+
+ claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_2);
+}
+
+#[test]
+fn monitor_update_claim_fail_no_response() {
+ // Test for claim_funds resulting in both a monitor update failure and no message response (due
+ // to channel being AwaitingRAA).
+ // Backported from chanmon_fail_consistency fuzz tests as an unmerged version of the handling
+ // code was broken.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Forward a payment for B to claim
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_2, 1000000);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+// Note that restore_between_fails with !fail_on_generate is useless
+// Also note that !fail_on_generate && !fail_on_signed is useless
+// Finally, note that !fail_on_signed is not possible with fail_on_generate && !restore_between_fails
+// confirm_a_first and restore_b_before_conf are wholly unrelated to earlier bools and
+// restore_b_before_conf has no meaning if !confirm_a_first
+fn do_during_funding_monitor_fail(fail_on_generate: bool, restore_between_fails: bool, fail_on_signed: bool, confirm_a_first: bool, restore_b_before_conf: bool) {
+ // Test that if the monitor update generated by funding_transaction_generated fails we continue
+ // the channel setup happily after the update is restored.
+ let mut nodes = create_network(2, &[None, None]);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())).unwrap();
+
+ let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], 100000, 43);
+
+ if fail_on_generate {
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ }
+ nodes[0].node.funding_transaction_generated(&temporary_channel_id, funding_output);
+ check_added_monitors!(nodes[0], 1);
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id())).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ if restore_between_fails {
+ assert!(fail_on_generate);
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ }
+
+ if fail_on_signed {
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::TemporaryFailure);
+ } else {
+ assert!(restore_between_fails || !fail_on_generate); // We can't switch to good now (there's no monitor update)
+ assert!(fail_on_generate); // Somebody has to fail
+ }
+ let funding_signed_res = nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()));
+ if fail_on_signed || !restore_between_fails {
++ if let msgs::LightningError { err, action: msgs::ErrorAction::IgnoreError } = funding_signed_res.unwrap_err() {
+ if fail_on_generate && !restore_between_fails {
+ assert_eq!(err, "Previous monitor update failure prevented funding_signed from allowing funding broadcast");
+ check_added_monitors!(nodes[0], 0);
+ } else {
+ assert_eq!(err, "Failed to update ChannelMonitor");
+ check_added_monitors!(nodes[0], 1);
+ }
+ } else { panic!(); }
+
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ *nodes[0].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[0].node.test_restore_channel_monitor();
+ } else {
+ funding_signed_res.unwrap();
+ }
+
+ check_added_monitors!(nodes[0], 1);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
+ assert_eq!(user_channel_id, 43);
+ assert_eq!(*funding_txo, funding_output);
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ if confirm_a_first {
+ confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
+ nodes[1].node.handle_funding_locked(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendFundingLocked, nodes[1].node.get_our_node_id())).unwrap();
+ } else {
+ assert!(!restore_b_before_conf);
+ confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ }
+
+ // Make sure nodes[1] isn't stupid enough to re-send the FundingLocked on reconnect
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ if !restore_b_before_conf {
+ confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ }
+
+ *nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
+ nodes[1].node.test_restore_channel_monitor();
+ check_added_monitors!(nodes[1], 1);
+
+ let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first {
+ nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingLocked, nodes[0].node.get_our_node_id())).unwrap();
+
+ confirm_transaction(&nodes[0].chain_monitor, &funding_tx, funding_tx.version);
+ let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]);
+ (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked))
+ } else {
+ if restore_b_before_conf {
+ confirm_transaction(&nodes[1].chain_monitor, &funding_tx, funding_tx.version);
+ }
+ let (funding_locked, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]);
+ (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &funding_locked))
+ };
+ for node in nodes.iter() {
+ assert!(node.router.handle_channel_announcement(&announcement).unwrap());
+ node.router.handle_channel_update(&as_update).unwrap();
+ node.router.handle_channel_update(&bs_update).unwrap();
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], 8000000);
+ close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
+}
+
+#[test]
+fn during_funding_monitor_fail() {
+ do_during_funding_monitor_fail(false, false, true, true, true);
+ do_during_funding_monitor_fail(true, false, true, false, false);
+ do_during_funding_monitor_fail(true, true, true, true, false);
+ do_during_funding_monitor_fail(true, true, false, false, false);
+}
--- /dev/null
- use ln::msgs::{ChannelMessageHandler, DecodeError, HandleError};
+//! The top-level channel management and payment tracking stuff lives here.
+//!
+//! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
+//! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
+//! upon reconnect to the relevant peer(s).
+//!
+//! It does not manage routing logic (see ln::router for that) nor does it manage constructing
+//! on-chain transactions (it only monitors the chain to watch for any force-closes that might
+//! imply it needs to fail HTLCs/payments/channels it manages).
+
+use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::transaction::Transaction;
+use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::network::constants::Network;
+use bitcoin::util::hash::BitcoinHash;
+
+use bitcoin_hashes::{Hash, HashEngine};
+use bitcoin_hashes::hmac::{Hmac, HmacEngine};
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+use bitcoin_hashes::cmp::fixed_time_eq;
+
+use secp256k1::key::{SecretKey,PublicKey};
+use secp256k1::Secp256k1;
+use secp256k1::ecdh::SharedSecret;
+use secp256k1;
+
+use chain::chaininterface::{BroadcasterInterface,ChainListener,ChainWatchInterface,FeeEstimator};
+use chain::transaction::OutPoint;
+use ln::channel::{Channel, ChannelError};
+use ln::channelmonitor::{ChannelMonitor, ChannelMonitorUpdateErr, ManyChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
+use ln::router::Route;
+use ln::msgs;
+use ln::msgs::LocalFeatures;
+use ln::onion_utils;
- ErrorPacket {
++use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError};
+use chain::keysinterface::KeysInterface;
+use util::config::UserConfig;
+use util::{byte_utils, events};
+use util::ser::{Readable, ReadableArgs, Writeable, Writer};
+use util::chacha20::ChaCha20;
+use util::logger::Logger;
+use util::errors::APIError;
+
+use std::{cmp, mem};
+use std::collections::{HashMap, hash_map, HashSet};
+use std::io::Cursor;
+use std::sync::{Arc, Mutex, MutexGuard, RwLock};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::time::Duration;
+
+// We hold various information about HTLC relay in the HTLC objects in Channel itself:
+//
+// Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
+// forward the HTLC with information it will give back to us when it does so, or if it should Fail
+// the HTLC with the relevant message for the Channel to handle giving to the remote peer.
+//
+// When a Channel forwards an HTLC to its peer, it will give us back the PendingForwardHTLCInfo
+// which we will use to construct an outbound HTLC, with a relevant HTLCSource::PreviousHopData
+// filled in to indicate where it came from (which we can use to either fail-backwards or fulfill
+// the HTLC backwards along the relevant path).
+// Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
+// our payment, which we can use to decode errors or inform the user that the payment was sent.
+/// Stores the info we will need to send when we want to forward an HTLC onwards
+#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+pub(super) struct PendingForwardHTLCInfo {
+ onion_packet: Option<msgs::OnionPacket>,
+ incoming_shared_secret: [u8; 32],
+ payment_hash: PaymentHash,
+ short_channel_id: u64,
+ pub(super) amt_to_forward: u64,
+ pub(super) outgoing_cltv_value: u32,
+}
+
+#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+pub(super) enum HTLCFailureMsg {
+ Relay(msgs::UpdateFailHTLC),
+ Malformed(msgs::UpdateFailMalformedHTLC),
+}
+
+/// Stores whether we can't forward an HTLC or relevant forwarding info
+#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+pub(super) enum PendingHTLCStatus {
+ Forward(PendingForwardHTLCInfo),
+ Fail(HTLCFailureMsg),
+}
+
+/// Tracks the inbound corresponding to an outbound HTLC
+#[derive(Clone, PartialEq)]
+pub(super) struct HTLCPreviousHopData {
+ short_channel_id: u64,
+ htlc_id: u64,
+ incoming_packet_shared_secret: [u8; 32],
+}
+
+/// Tracks the inbound corresponding to an outbound HTLC
+#[derive(Clone, PartialEq)]
+pub(super) enum HTLCSource {
+ PreviousHopData(HTLCPreviousHopData),
+ OutboundRoute {
+ route: Route,
+ session_priv: SecretKey,
+ /// Technically we can recalculate this from the route, but we cache it here to avoid
+ /// doing a double-pass on route when we get a failure back
+ first_hop_htlc_msat: u64,
+ },
+}
+#[cfg(test)]
+impl HTLCSource {
+ pub fn dummy() -> Self {
+ HTLCSource::OutboundRoute {
+ route: Route { hops: Vec::new() },
+ session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
+ first_hop_htlc_msat: 0,
+ }
+ }
+}
+
+#[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
+pub(super) enum HTLCFailReason {
- err: msgs::HandleError,
++ LightningError {
+ err: msgs::OnionErrorPacket,
+ },
+ Reason {
+ failure_code: u16,
+ data: Vec<u8>,
+ }
+}
+
+/// payment_hash type, use to cross-lock hop
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
+pub struct PaymentHash(pub [u8;32]);
+/// payment_preimage type, use to route payment between hop
+#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
+pub struct PaymentPreimage(pub [u8;32]);
+
+type ShutdownResult = (Vec<Transaction>, Vec<(HTLCSource, PaymentHash)>);
+
+/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
+/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
+/// immediately (ie with no further calls on it made). Thus, this step happens inside a
+/// channel_state lock. We then return the set of things that need to be done outside the lock in
+/// this struct and call handle_error!() on it.
+
+struct MsgHandleErrInternal {
- err: HandleError {
++ err: msgs::LightningError,
+ shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
+}
+impl MsgHandleErrInternal {
+ #[inline]
+ fn send_err_msg_no_close(err: &'static str, channel_id: [u8; 32]) -> Self {
+ Self {
- action: Some(msgs::ErrorAction::SendErrorMessage {
++ err: LightningError {
+ err,
- }),
++ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: err.to_string()
+ },
- err: HandleError {
++ },
+ },
+ shutdown_finish: None,
+ }
+ }
+ #[inline]
+ fn ignore_no_close(err: &'static str) -> Self {
+ Self {
- action: Some(msgs::ErrorAction::IgnoreError),
++ err: LightningError {
+ err,
- fn from_no_close(err: msgs::HandleError) -> Self {
++ action: msgs::ErrorAction::IgnoreError,
+ },
+ shutdown_finish: None,
+ }
+ }
+ #[inline]
- err: HandleError {
++ fn from_no_close(err: msgs::LightningError) -> Self {
+ Self { err, shutdown_finish: None }
+ }
+ #[inline]
+ fn from_finish_shutdown(err: &'static str, channel_id: [u8; 32], shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
+ Self {
- action: Some(msgs::ErrorAction::SendErrorMessage {
++ err: LightningError {
+ err,
- }),
++ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: err.to_string()
+ },
- ChannelError::Ignore(msg) => HandleError {
++ },
+ },
+ shutdown_finish: Some((shutdown_res, channel_update)),
+ }
+ }
+ #[inline]
+ fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
+ Self {
+ err: match err {
- action: Some(msgs::ErrorAction::IgnoreError),
++ ChannelError::Ignore(msg) => LightningError {
+ err: msg,
- ChannelError::Close(msg) => HandleError {
++ action: msgs::ErrorAction::IgnoreError,
+ },
- action: Some(msgs::ErrorAction::SendErrorMessage {
++ ChannelError::Close(msg) => LightningError {
+ err: msg,
- }),
++ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: msg.to_string()
+ },
- ChannelError::CloseDelayBroadcast { msg, .. } => HandleError {
++ },
+ },
- action: Some(msgs::ErrorAction::SendErrorMessage {
++ ChannelError::CloseDelayBroadcast { msg, .. } => LightningError {
+ err: msg,
- }),
++ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage {
+ channel_id,
+ data: msg.to_string()
+ },
- pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface>, config: UserConfig) -> Result<Arc<ChannelManager>, secp256k1::Error> {
++ },
+ },
+ },
+ shutdown_finish: None,
+ }
+ }
+}
+
+/// We hold back HTLCs we intend to relay for a random interval greater than this (see
+/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
+/// This provides some limited amount of privacy. Ideally this would range from somewhere like one
+/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
+const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
+
+pub(super) enum HTLCForwardInfo {
+ AddHTLC {
+ prev_short_channel_id: u64,
+ prev_htlc_id: u64,
+ forward_info: PendingForwardHTLCInfo,
+ },
+ FailHTLC {
+ htlc_id: u64,
+ err_packet: msgs::OnionErrorPacket,
+ },
+}
+
+/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
+/// be sent in the order they appear in the return value, however sometimes the order needs to be
+/// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
+/// they were originally sent). In those cases, this enum is also returned.
+#[derive(Clone, PartialEq)]
+pub(super) enum RAACommitmentOrder {
+ /// Send the CommitmentUpdate messages first
+ CommitmentFirst,
+ /// Send the RevokeAndACK message first
+ RevokeAndACKFirst,
+}
+
+// Note this is only exposed in cfg(test):
+pub(super) struct ChannelHolder {
+ pub(super) by_id: HashMap<[u8; 32], Channel>,
+ pub(super) short_to_id: HashMap<u64, [u8; 32]>,
+ /// short channel id -> forward infos. Key of 0 means payments received
+ /// Note that while this is held in the same mutex as the channels themselves, no consistency
+ /// guarantees are made about the existence of a channel with the short id here, nor the short
+ /// ids in the PendingForwardHTLCInfo!
+ pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
+ /// payment_hash -> Vec<(amount_received, htlc_source)> for tracking things that were to us and
+ /// can be failed/claimed by the user
+ /// Note that while this is held in the same mutex as the channels themselves, no consistency
+ /// guarantees are made about the channels given here actually existing anymore by the time you
+ /// go to read them!
+ pub(super) claimable_htlcs: HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
+ /// Messages to send to peers - pushed to in the same lock that they are generated in (except
+ /// for broadcast messages, where ordering isn't as strict).
+ pub(super) pending_msg_events: Vec<events::MessageSendEvent>,
+}
+pub(super) struct MutChannelHolder<'a> {
+ pub(super) by_id: &'a mut HashMap<[u8; 32], Channel>,
+ pub(super) short_to_id: &'a mut HashMap<u64, [u8; 32]>,
+ pub(super) forward_htlcs: &'a mut HashMap<u64, Vec<HTLCForwardInfo>>,
+ pub(super) claimable_htlcs: &'a mut HashMap<PaymentHash, Vec<(u64, HTLCPreviousHopData)>>,
+ pub(super) pending_msg_events: &'a mut Vec<events::MessageSendEvent>,
+}
+impl ChannelHolder {
+ pub(super) fn borrow_parts(&mut self) -> MutChannelHolder {
+ MutChannelHolder {
+ by_id: &mut self.by_id,
+ short_to_id: &mut self.short_to_id,
+ forward_htlcs: &mut self.forward_htlcs,
+ claimable_htlcs: &mut self.claimable_htlcs,
+ pending_msg_events: &mut self.pending_msg_events,
+ }
+ }
+}
+
+#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
+const ERR: () = "You need at least 32 bit pointers (well, usize, but we'll assume they're the same) for ChannelManager::latest_block_height";
+
+/// Manager which keeps track of a number of channels and sends messages to the appropriate
+/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
+///
+/// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
+/// to individual Channels.
+///
+/// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
+/// all peers during write/read (though does not modify this instance, only the instance being
+/// serialized). This will result in any channels which have not yet exchanged funding_created (ie
+/// called funding_transaction_generated for outbound channels).
+///
+/// Note that you can be a bit lazier about writing out ChannelManager than you can be with
+/// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
+/// returning from ManyChannelMonitor::add_update_monitor, with ChannelManagers, writing updates
+/// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
+/// the serialization process). If the deserialized version is out-of-date compared to the
+/// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
+/// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
+///
+/// Note that the deserializer is only implemented for (Sha256dHash, ChannelManager), which
+/// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
+/// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
+/// block_connected() to step towards your best block) upon deserialization before using the
+/// object!
+pub struct ChannelManager {
+ default_configuration: UserConfig,
+ genesis_hash: Sha256dHash,
+ fee_estimator: Arc<FeeEstimator>,
+ monitor: Arc<ManyChannelMonitor>,
+ chain_monitor: Arc<ChainWatchInterface>,
+ tx_broadcaster: Arc<BroadcasterInterface>,
+
+ #[cfg(test)]
+ pub(super) latest_block_height: AtomicUsize,
+ #[cfg(not(test))]
+ latest_block_height: AtomicUsize,
+ last_block_hash: Mutex<Sha256dHash>,
+ secp_ctx: Secp256k1<secp256k1::All>,
+
+ #[cfg(test)]
+ pub(super) channel_state: Mutex<ChannelHolder>,
+ #[cfg(not(test))]
+ channel_state: Mutex<ChannelHolder>,
+ our_network_key: SecretKey,
+
+ pending_events: Mutex<Vec<events::Event>>,
+ /// Used when we have to take a BIG lock to make sure everything is self-consistent.
+ /// Essentially just when we're serializing ourselves out.
+ /// Taken first everywhere where we are making changes before any other locks.
+ total_consistency_lock: RwLock<()>,
+
+ keys_manager: Arc<KeysInterface>,
+
+ logger: Arc<Logger>,
+}
+
+/// The amount of time we require our counterparty wait to claim their money (ie time between when
+/// we, or our watchtower, must check for them having broadcast a theft transaction).
+pub(crate) const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
+/// The amount of time we're willing to wait to claim money back to us
+pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 6 * 24 * 7;
+
+/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
+/// HTLC's CLTV. This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
+/// ie the node we forwarded the payment on to should always have enough room to reliably time out
+/// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
+/// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
+const CLTV_EXPIRY_DELTA: u16 = 6 * 12; //TODO?
+pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
+
+// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
+// ie that if the next-hop peer fails the HTLC within
+// LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
+// then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
+// failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
+// LATENCY_GRACE_PERIOD_BLOCKS.
+#[deny(const_err)]
+#[allow(dead_code)]
+const CHECK_CLTV_EXPIRY_SANITY: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
+
+// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
+// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
+#[deny(const_err)]
+#[allow(dead_code)]
+const CHECK_CLTV_EXPIRY_SANITY_2: u32 = CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
+
+macro_rules! secp_call {
+ ( $res: expr, $err: expr ) => {
+ match $res {
+ Ok(key) => key,
+ Err(_) => return Err($err),
+ }
+ };
+}
+
+/// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
+pub struct ChannelDetails {
+ /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
+ /// thereafter this is the txid of the funding transaction xor the funding transaction output).
+ /// Note that this means this value is *not* persistent - it can change once during the
+ /// lifetime of the channel.
+ pub channel_id: [u8; 32],
+ /// The position of the funding transaction in the chain. None if the funding transaction has
+ /// not yet been confirmed and the channel fully opened.
+ pub short_channel_id: Option<u64>,
+ /// The node_id of our counterparty
+ pub remote_network_id: PublicKey,
+ /// The value, in satoshis, of this channel as appears in the funding output
+ pub channel_value_satoshis: u64,
+ /// The user_id passed in to create_channel, or 0 if the channel was inbound.
+ pub user_id: u64,
+ /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
+ /// any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// available for inclusion in new outbound HTLCs). This further does not include any pending
+ /// outgoing HTLCs which are awaiting some other resolution to be sent.
+ pub outbound_capacity_msat: u64,
+ /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
+ /// include any pending HTLCs which are not yet fully resolved (and, thus, who's balance is not
+ /// available for inclusion in new inbound HTLCs).
+ /// Note that there are some corner cases not fully handled here, so the actual available
+ /// inbound capacity may be slightly higher than this.
+ pub inbound_capacity_msat: u64,
+ /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
+ /// the peer is connected, and (c) no monitor update failure is pending resolution.
+ pub is_live: bool,
+}
+
+macro_rules! handle_error {
+ ($self: ident, $internal: expr) => {
+ match $internal {
+ Ok(msg) => Ok(msg),
+ Err(MsgHandleErrInternal { err, shutdown_finish }) => {
+ if let Some((shutdown_res, update_option)) = shutdown_finish {
+ $self.finish_force_close_channel(shutdown_res);
+ if let Some(update) = update_option {
+ let mut channel_state = $self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ }
+ Err(err)
+ },
+ }
+ }
+}
+
+macro_rules! break_chan_entry {
+ ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
+ match $res {
+ Ok(res) => res,
+ Err(ChannelError::Ignore(msg)) => {
+ break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
+ },
+ Err(ChannelError::Close(msg)) => {
+ log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
+ let (channel_id, mut chan) = $entry.remove_entry();
+ if let Some(short_id) = chan.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
+ }
+ break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
+ },
+ Err(ChannelError::CloseDelayBroadcast { .. }) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
+ }
+ }
+}
+
+macro_rules! try_chan_entry {
+ ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
+ match $res {
+ Ok(res) => res,
+ Err(ChannelError::Ignore(msg)) => {
+ return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
+ },
+ Err(ChannelError::Close(msg)) => {
+ log_trace!($self, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
+ let (channel_id, mut chan) = $entry.remove_entry();
+ if let Some(short_id) = chan.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
+ }
+ return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()))
+ },
+ Err(ChannelError::CloseDelayBroadcast { msg, update }) => {
+ log_error!($self, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
+ let (channel_id, mut chan) = $entry.remove_entry();
+ if let Some(short_id) = chan.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
+ }
+ if let Some(update) = update {
+ if let Err(e) = $self.monitor.add_update_monitor(update.get_funding_txo().unwrap(), update) {
+ match e {
+ // Upstream channel is dead, but we want at least to fail backward HTLCs to save
+ // downstream channels. In case of PermanentFailure, we are not going to be able
+ // to claim back to_remote output on remote commitment transaction. Doesn't
+ // make a difference here, we are concern about HTLCs circuit, not onchain funds.
+ ChannelMonitorUpdateErr::PermanentFailure => {},
+ ChannelMonitorUpdateErr::TemporaryFailure => {},
+ }
+ }
+ }
+ let mut shutdown_res = chan.force_shutdown();
+ if shutdown_res.0.len() >= 1 {
+ log_error!($self, "You have a toxic local commitment transaction {} avaible in channel monitor, read comment in ChannelMonitor::get_latest_local_commitment_txn to be informed of manual action to take", shutdown_res.0[0].txid());
+ }
+ shutdown_res.0.clear();
+ return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
+ }
+ }
+ }
+}
+
+macro_rules! handle_monitor_err {
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
+ handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
+ };
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ match $err {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ log_error!($self, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..]));
+ let (channel_id, mut chan) = $entry.remove_entry();
+ if let Some(short_id) = chan.get_short_channel_id() {
+ $channel_state.short_to_id.remove(&short_id);
+ }
+ // TODO: $failed_fails is dropped here, which will cause other channels to hit the
+ // chain in a confused state! We need to move them into the ChannelMonitor which
+ // will be responsible for failing backwards once things confirm on-chain.
+ // It's ok that we drop $failed_forwards here - at this point we'd rather they
+ // broadcast HTLC-Timeout and pay the associated fees to get their funds back than
+ // us bother trying to claim it just to forward on to another peer. If we're
+ // splitting hairs we'd prefer to claim payments that were to us, but we haven't
+ // given up the preimage yet, so might as well just wait until the payment is
+ // retried, avoiding the on-chain fees.
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", channel_id, chan.force_shutdown(), $self.get_channel_update(&chan).ok()));
+ res
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => {
+ log_info!($self, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
+ log_bytes!($entry.key()[..]),
+ if $resend_commitment && $resend_raa {
+ match $action_type {
+ RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
+ RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
+ }
+ } else if $resend_commitment { "commitment" }
+ else if $resend_raa { "RAA" }
+ else { "nothing" },
+ (&$failed_forwards as &Vec<(PendingForwardHTLCInfo, u64)>).len(),
+ (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len());
+ if !$resend_commitment {
+ debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
+ }
+ if !$resend_raa {
+ debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
+ }
+ $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor"), *$entry.key()))
+ },
+ }
+ }
+}
+
+macro_rules! return_monitor_err {
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
+ return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
+ };
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ }
+}
+
+// Does not break in case of TemporaryFailure!
+macro_rules! maybe_break_monitor_err {
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
+ match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
+ (e, ChannelMonitorUpdateErr::PermanentFailure) => {
+ break e;
+ },
+ (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
+ }
+ }
+}
+
+impl ChannelManager {
+ /// Constructs a new ChannelManager to hold several channels and route between them.
+ ///
+ /// This is the main "logic hub" for all channel-related actions, and implements
+ /// ChannelMessageHandler.
+ ///
+ /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
+ ///
+ /// panics if channel_value_satoshis is >= `MAX_FUNDING_SATOSHIS`!
- latest_block_height: AtomicUsize::new(0), //TODO: Get an init value
++ ///
++ /// User must provide the current blockchain height from which to track onchain channel
++ /// funding outpoints and send payments with reliable timelocks.
++ pub fn new(network: Network, feeest: Arc<FeeEstimator>, monitor: Arc<ManyChannelMonitor>, chain_monitor: Arc<ChainWatchInterface>, tx_broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>,keys_manager: Arc<KeysInterface>, config: UserConfig, current_blockchain_height: usize) -> Result<Arc<ChannelManager>, secp256k1::Error> {
+ let secp_ctx = Secp256k1::new();
+
+ let res = Arc::new(ChannelManager {
+ default_configuration: config.clone(),
+ genesis_hash: genesis_block(network).header.bitcoin_hash(),
+ fee_estimator: feeest.clone(),
+ monitor: monitor.clone(),
+ chain_monitor,
+ tx_broadcaster,
+
- fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, HandleError> {
++ latest_block_height: AtomicUsize::new(current_blockchain_height),
+ last_block_hash: Mutex::new(Default::default()),
+ secp_ctx,
+
+ channel_state: Mutex::new(ChannelHolder{
+ by_id: HashMap::new(),
+ short_to_id: HashMap::new(),
+ forward_htlcs: HashMap::new(),
+ claimable_htlcs: HashMap::new(),
+ pending_msg_events: Vec::new(),
+ }),
+ our_network_key: keys_manager.get_node_secret(),
+
+ pending_events: Mutex::new(Vec::new()),
+ total_consistency_lock: RwLock::new(()),
+
+ keys_manager,
+
+ logger,
+ });
+ let weak_res = Arc::downgrade(&res);
+ res.chain_monitor.register_listener(weak_res);
+ Ok(res)
+ }
+
+ /// Creates a new outbound channel to the given remote node and with the given value.
+ ///
+ /// user_id will be provided back as user_channel_id in FundingGenerationReady and
+ /// FundingBroadcastSafe events to allow tracking of which events correspond with which
+ /// create_channel call. Note that user_channel_id defaults to 0 for inbound channels, so you
+ /// may wish to avoid using 0 for user_id here.
+ ///
+ /// If successful, will generate a SendOpenChannel message event, so you should probably poll
+ /// PeerManager::process_events afterwards.
+ ///
+ /// Raises APIError::APIMisuseError when channel_value_satoshis > 2**24 or push_msat is
+ /// greater than channel_value_satoshis * 1k or channel_value_satoshis is < 1000.
+ pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64) -> Result<(), APIError> {
+ if channel_value_satoshis < 1000 {
+ return Err(APIError::APIMisuseError { err: "channel_value must be at least 1000 satoshis" });
+ }
+
+ let channel = Channel::new_outbound(&*self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, Arc::clone(&self.logger), &self.default_configuration)?;
+ let res = channel.get_open_channel(self.genesis_hash.clone(), &*self.fee_estimator);
+
+ let _ = self.total_consistency_lock.read().unwrap();
+ let mut channel_state = self.channel_state.lock().unwrap();
+ match channel_state.by_id.entry(channel.channel_id()) {
+ hash_map::Entry::Occupied(_) => {
+ if cfg!(feature = "fuzztarget") {
+ return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG" });
+ } else {
+ panic!("RNG is bad???");
+ }
+ },
+ hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
+ }
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: their_network_key,
+ msg: res,
+ });
+ Ok(())
+ }
+
+ /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
+ /// more information.
+ pub fn list_channels(&self) -> Vec<ChannelDetails> {
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut res = Vec::with_capacity(channel_state.by_id.len());
+ for (channel_id, channel) in channel_state.by_id.iter() {
+ let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
+ res.push(ChannelDetails {
+ channel_id: (*channel_id).clone(),
+ short_channel_id: channel.get_short_channel_id(),
+ remote_network_id: channel.get_their_node_id(),
+ channel_value_satoshis: channel.get_value_satoshis(),
+ inbound_capacity_msat,
+ outbound_capacity_msat,
+ user_id: channel.get_user_id(),
+ is_live: channel.is_live(),
+ });
+ }
+ res
+ }
+
+ /// Gets the list of usable channels, in random order. Useful as an argument to
+ /// Router::get_route to ensure non-announced channels are used.
+ ///
+ /// These are guaranteed to have their is_live value set to true, see the documentation for
+ /// ChannelDetails::is_live for more info on exactly what the criteria are.
+ pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut res = Vec::with_capacity(channel_state.by_id.len());
+ for (channel_id, channel) in channel_state.by_id.iter() {
+ // Note we use is_live here instead of usable which leads to somewhat confused
+ // internal/external nomenclature, but that's ok cause that's probably what the user
+ // really wanted anyway.
+ if channel.is_live() {
+ let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
+ res.push(ChannelDetails {
+ channel_id: (*channel_id).clone(),
+ short_channel_id: channel.get_short_channel_id(),
+ remote_network_id: channel.get_their_node_id(),
+ channel_value_satoshis: channel.get_value_satoshis(),
+ inbound_capacity_msat,
+ outbound_capacity_msat,
+ user_id: channel.get_user_id(),
+ is_live: true,
+ });
+ }
+ }
+ res
+ }
+
+ /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
+ /// will be accepted on the given channel, and after additional timeout/the closing of all
+ /// pending HTLCs, the channel will be closed on chain.
+ ///
+ /// May generate a SendShutdown message event on success, which should be relayed.
+ pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let (mut failed_htlcs, chan_option) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ let (shutdown_msg, failed_htlcs) = chan_entry.get_mut().get_shutdown()?;
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: chan_entry.get().get_their_node_id(),
+ msg: shutdown_msg
+ });
+ if chan_entry.get().is_shutdown() {
+ if let Some(short_id) = chan_entry.get().get_short_channel_id() {
+ channel_state.short_to_id.remove(&short_id);
+ }
+ (failed_htlcs, Some(chan_entry.remove_entry().1))
+ } else { (failed_htlcs, None) }
+ },
+ hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel"})
+ }
+ };
+ for htlc_source in failed_htlcs.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ let chan_update = if let Some(chan) = chan_option {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ Some(update)
+ } else { None }
+ } else { None };
+
+ if let Some(update) = chan_update {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+
+ Ok(())
+ }
+
+ #[inline]
+ fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
+ let (local_txn, mut failed_htlcs) = shutdown_res;
+ log_trace!(self, "Finishing force-closure of channel with {} transactions to broadcast and {} HTLCs to fail", local_txn.len(), failed_htlcs.len());
+ for htlc_source in failed_htlcs.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ for tx in local_txn {
+ self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+ }
+
+ /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
+ /// the chain and rejecting new HTLCs on the given channel.
+ pub fn force_close_channel(&self, channel_id: &[u8; 32]) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let mut chan = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ if let Some(chan) = channel_state.by_id.remove(channel_id) {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ channel_state.short_to_id.remove(&short_id);
+ }
+ chan
+ } else {
+ return;
+ }
+ };
+ log_trace!(self, "Force-closing channel {}", log_bytes!(channel_id[..]));
+ self.finish_force_close_channel(chan.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&chan) {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ }
+
+ /// Force close all channels, immediately broadcasting the latest local commitment transaction
+ /// for each to the chain and rejecting new HTLCs on each.
+ pub fn force_close_all_channels(&self) {
+ for chan in self.list_channels() {
+ self.force_close_channel(&chan.channel_id);
+ }
+ }
+
+ const ZERO:[u8; 65] = [0; 65];
+ fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder>) {
+ macro_rules! return_malformed_err {
+ ($msg: expr, $err_code: expr) => {
+ {
+ log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
+ return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
+ failure_code: $err_code,
+ })), self.channel_state.lock().unwrap());
+ }
+ }
+ }
+
+ if let Err(_) = msg.onion_routing_packet.public_key {
+ return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
+ }
+
+ let shared_secret = {
+ let mut arr = [0; 32];
+ arr.copy_from_slice(&SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key)[..]);
+ arr
+ };
+ let (rho, mu) = onion_utils::gen_rho_mu_from_shared_secret(&shared_secret);
+
+ if msg.onion_routing_packet.version != 0 {
+ //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
+ //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
+ //the hash doesn't really serve any purpose - in the case of hashing all data, the
+ //receiving node would have to brute force to figure out which version was put in the
+ //packet by the node that send us the message, in the case of hashing the hop_data, the
+ //node knows the HMAC matched, so they already know what is there...
+ return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
+ }
+
+ let mut hmac = HmacEngine::<Sha256>::new(&mu);
+ hmac.input(&msg.onion_routing_packet.hop_data);
+ hmac.input(&msg.payment_hash.0[..]);
+ if !fixed_time_eq(&Hmac::from_engine(hmac).into_inner(), &msg.onion_routing_packet.hmac) {
+ return_malformed_err!("HMAC Check failed", 0x8000 | 0x4000 | 5);
+ }
+
+ let mut channel_state = None;
+ macro_rules! return_err {
+ ($msg: expr, $err_code: expr, $data: expr) => {
+ {
+ log_info!(self, "Failed to accept/forward incoming HTLC: {}", $msg);
+ if channel_state.is_none() {
+ channel_state = Some(self.channel_state.lock().unwrap());
+ }
+ return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
+ })), channel_state.unwrap());
+ }
+ }
+ }
+
+ let mut chacha = ChaCha20::new(&rho, &[0u8; 8]);
+ let next_hop_data = {
+ let mut decoded = [0; 65];
+ chacha.process(&msg.onion_routing_packet.hop_data[0..65], &mut decoded);
+ match msgs::OnionHopData::read(&mut Cursor::new(&decoded[..])) {
+ Err(err) => {
+ let error_code = match err {
+ msgs::DecodeError::UnknownVersion => 0x4000 | 1, // unknown realm byte
+ _ => 0x2000 | 2, // Should never happen
+ };
+ return_err!("Unable to decode our hop data", error_code, &[0;0]);
+ },
+ Ok(msg) => msg
+ }
+ };
+
+ let pending_forward_info = if next_hop_data.hmac == [0; 32] {
+ // OUR PAYMENT!
+ // final_expiry_too_soon
+ if (msg.cltv_expiry as u64) < self.latest_block_height.load(Ordering::Acquire) as u64 + (CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
+ return_err!("The final CLTV expiry is too soon to handle", 17, &[0;0]);
+ }
+ // final_incorrect_htlc_amount
+ if next_hop_data.data.amt_to_forward > msg.amount_msat {
+ return_err!("Upstream node sent less than we were supposed to receive in payment", 19, &byte_utils::be64_to_array(msg.amount_msat));
+ }
+ // final_incorrect_cltv_expiry
+ if next_hop_data.data.outgoing_cltv_value != msg.cltv_expiry {
+ return_err!("Upstream node set CLTV to the wrong value", 18, &byte_utils::be32_to_array(msg.cltv_expiry));
+ }
+
+ // Note that we could obviously respond immediately with an update_fulfill_htlc
+ // message, however that would leak that we are the recipient of this payment, so
+ // instead we stay symmetric with the forwarding case, only responding (after a
+ // delay) once they've send us a commitment_signed!
+
+ PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ onion_packet: None,
+ payment_hash: msg.payment_hash.clone(),
+ short_channel_id: 0,
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ })
+ } else {
+ let mut new_packet_data = [0; 20*65];
+ chacha.process(&msg.onion_routing_packet.hop_data[65..], &mut new_packet_data[0..19*65]);
+ chacha.process(&ChannelManager::ZERO[..], &mut new_packet_data[19*65..]);
+
+ let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
+
+ let blinding_factor = {
+ let mut sha = Sha256::engine();
+ sha.input(&new_pubkey.serialize()[..]);
+ sha.input(&shared_secret);
+ Sha256::from_engine(sha).into_inner()
+ };
+
+ let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
+ Err(e)
+ } else { Ok(new_pubkey) };
+
+ let outgoing_packet = msgs::OnionPacket {
+ version: 0,
+ public_key,
+ hop_data: new_packet_data,
+ hmac: next_hop_data.hmac.clone(),
+ };
+
+ PendingHTLCStatus::Forward(PendingForwardHTLCInfo {
+ onion_packet: Some(outgoing_packet),
+ payment_hash: msg.payment_hash.clone(),
+ short_channel_id: next_hop_data.data.short_channel_id,
+ incoming_shared_secret: shared_secret,
+ amt_to_forward: next_hop_data.data.amt_to_forward,
+ outgoing_cltv_value: next_hop_data.data.outgoing_cltv_value,
+ })
+ };
+
+ channel_state = Some(self.channel_state.lock().unwrap());
+ if let &PendingHTLCStatus::Forward(PendingForwardHTLCInfo { ref onion_packet, ref short_channel_id, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
+ if onion_packet.is_some() { // If short_channel_id is 0 here, we'll reject them in the body here
+ let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
+ let forwarding_id = match id_option {
+ None => { // unknown_next_peer
+ return_err!("Don't have available channel for forwarding as requested.", 0x4000 | 10, &[0;0]);
+ },
+ Some(id) => id.clone(),
+ };
+ if let Some((err, code, chan_update)) = loop {
+ let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
+
+ // Note that we could technically not return an error yet here and just hope
+ // that the connection is reestablished or monitor updated by the time we get
+ // around to doing the actual forward, but better to fail early if we can and
+ // hopefully an attacker trying to path-trace payments cannot make this occur
+ // on a small/per-node/per-channel scale.
+ if !chan.is_live() { // channel_disabled
+ break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if *amt_to_forward < chan.get_their_htlc_minimum_msat() { // amount_below_minimum
+ break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, Some(self.get_channel_update(chan).unwrap())));
+ }
+ let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64).and_then(|prop_fee| { (prop_fee / 1000000).checked_add(chan.get_our_fee_base_msat(&*self.fee_estimator) as u64) });
+ if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
+ break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + CLTV_EXPIRY_DELTA as u64 { // incorrect_cltv_expiry
+ break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, Some(self.get_channel_update(chan).unwrap())));
+ }
+ let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ // We want to have at least LATENCY_GRACE_PERIOD_BLOCKS to fail prior to going on chain CLAIM_BUFFER blocks before expiration
+ if msg.cltv_expiry <= cur_height + CLTV_CLAIM_BUFFER + LATENCY_GRACE_PERIOD_BLOCKS as u32 { // expiry_too_soon
+ break Some(("CLTV expiry is too close", 0x1000 | 14, Some(self.get_channel_update(chan).unwrap())));
+ }
+ if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
+ break Some(("CLTV expiry is too far in the future", 21, None));
+ }
+ break None;
+ }
+ {
+ let mut res = Vec::with_capacity(8 + 128);
+ if let Some(chan_update) = chan_update {
+ if code == 0x1000 | 11 || code == 0x1000 | 12 {
+ res.extend_from_slice(&byte_utils::be64_to_array(msg.amount_msat));
+ }
+ else if code == 0x1000 | 13 {
+ res.extend_from_slice(&byte_utils::be32_to_array(msg.cltv_expiry));
+ }
+ else if code == 0x1000 | 20 {
+ res.extend_from_slice(&byte_utils::be16_to_array(chan_update.contents.flags));
+ }
+ res.extend_from_slice(&chan_update.encode_with_len()[..]);
+ }
+ return_err!(err, code, &res[..]);
+ }
+ }
+ }
+
+ (pending_forward_info, channel_state.unwrap())
+ }
+
+ /// only fails if the channel does not yet have an assigned short_id
+ /// May be called with channel_state already locked!
- None => return Err(HandleError{err: "Channel not yet established", action: None}),
++ fn get_channel_update(&self, chan: &Channel) -> Result<msgs::ChannelUpdate, LightningError> {
+ let short_channel_id = match chan.get_short_channel_id() {
- if let Some(msgs::ErrorAction::IgnoreError) = e.action {
++ None => return Err(LightningError{err: "Channel not yet established", action: msgs::ErrorAction::IgnoreError}),
+ Some(id) => id,
+ };
+
+ let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_their_node_id().serialize()[..];
+
+ let unsigned = msgs::UnsignedChannelUpdate {
+ chain_hash: self.genesis_hash,
+ short_channel_id: short_channel_id,
+ timestamp: chan.get_channel_update_count(),
+ flags: (!were_node_one) as u16 | ((!chan.is_live() as u16) << 1),
+ cltv_expiry_delta: CLTV_EXPIRY_DELTA,
+ htlc_minimum_msat: chan.get_our_htlc_minimum_msat(),
+ fee_base_msat: chan.get_our_fee_base_msat(&*self.fee_estimator),
+ fee_proportional_millionths: chan.get_fee_proportional_millionths(),
+ excess_data: Vec::new(),
+ };
+
+ let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
+ let sig = self.secp_ctx.sign(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
+
+ Ok(msgs::ChannelUpdate {
+ signature: sig,
+ contents: unsigned
+ })
+ }
+
+ /// Sends a payment along a given route.
+ ///
+ /// Value parameters are provided via the last hop in route, see documentation for RouteHop
+ /// fields for more info.
+ ///
+ /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
+ /// payment), we don't do anything to stop you! We always try to ensure that if the provided
+ /// next hop knows the preimage to payment_hash they can claim an additional amount as
+ /// specified in the last hop in the route! Thus, you should probably do your own
+ /// payment_preimage tracking (which you should already be doing as they represent "proof of
+ /// payment") and prevent double-sends yourself.
+ ///
+ /// May generate a SendHTLCs message event on success, which should be relayed.
+ ///
+ /// Raises APIError::RoutError when invalid route or forward parameter
+ /// (cltv_delta, fee, node public key) is specified.
+ /// Raises APIError::ChannelUnavailable if the next-hop channel is not available for updates
+ /// (including due to previous monitor update failure or new permanent monitor update failure).
+ /// Raised APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
+ /// relevant updates.
+ ///
+ /// In case of APIError::RouteError/APIError::ChannelUnavailable, the payment send has failed
+ /// and you may wish to retry via a different route immediately.
+ /// In case of APIError::MonitorUpdateFailed, the commitment update has been irrevocably
+ /// committed on our end and we're just waiting for a monitor update to send it. Do NOT retry
+ /// the payment via a different route unless you intend to pay twice!
+ pub fn send_payment(&self, route: Route, payment_hash: PaymentHash) -> Result<(), APIError> {
+ if route.hops.len() < 1 || route.hops.len() > 20 {
+ return Err(APIError::RouteError{err: "Route didn't go anywhere/had bogus size"});
+ }
+ let our_node_id = self.get_our_node_id();
+ for (idx, hop) in route.hops.iter().enumerate() {
+ if idx != route.hops.len() - 1 && hop.pubkey == our_node_id {
+ return Err(APIError::RouteError{err: "Route went through us but wasn't a simple rebalance loop to us"});
+ }
+ }
+
+ let session_priv = self.keys_manager.get_session_key();
+
+ let cur_height = self.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+
+ let onion_keys = secp_call!(onion_utils::construct_onion_keys(&self.secp_ctx, &route, &session_priv),
+ APIError::RouteError{err: "Pubkey along hop was maliciously selected"});
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height)?;
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let err: Result<(), _> = loop {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+
+ let id = match channel_lock.short_to_id.get(&route.hops.first().unwrap().short_channel_id) {
+ None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!"}),
+ Some(id) => id.clone(),
+ };
+
+ let channel_state = channel_lock.borrow_parts();
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
+ match {
+ if chan.get().get_their_node_id() != route.hops.first().unwrap().pubkey {
+ return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
+ }
+ if !chan.get().is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!"});
+ }
+ break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ }, onion_packet), channel_state, chan)
+ } {
+ Some((update_add, commitment_signed, chan_monitor)) => {
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
+ // Note that MonitorUpdateFailed here indicates (per function docs)
+ // that we will resent the commitment update once we unfree monitor
+ // updating, so we have to take special care that we don't return
+ // something else in case we will resend later!
+ return Err(APIError::MonitorUpdateFailed);
+ }
+
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: route.hops.first().unwrap().pubkey,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: vec![update_add],
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ },
+ });
+ },
+ None => {},
+ }
+ } else { unreachable!(); }
+ return Ok(());
+ };
+
+ match handle_error!(self, err) {
+ Ok(_) => unreachable!(),
+ Err(e) => {
- if let ChannelError::Ignore(_) = e {
- panic!("Stated return value requirements in send_commitment() were not met");
++ if let msgs::ErrorAction::IgnoreError = e.action {
+ } else {
+ log_error!(self, "Got bad keys: {}!", e.err);
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: route.hops.first().unwrap().pubkey,
+ action: e.action,
+ });
+ }
+ Err(APIError::ChannelUnavailable { err: e.err })
+ },
+ }
+ }
+
+ /// Call this upon creation of a funding transaction for the given channel.
+ ///
+ /// Note that ALL inputs in the transaction pointed to by funding_txo MUST spend SegWit outputs
+ /// or your counterparty can steal your funds!
+ ///
+ /// Panics if a funding transaction has already been provided for this channel.
+ ///
+ /// May panic if the funding_txo is duplicative with some other channel (note that this should
+ /// be trivially prevented by using unique funding transaction keys per-channel).
+ pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_txo: OutPoint) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let (mut chan, msg, chan_monitor) = {
+ let (res, chan) = {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ match channel_state.by_id.remove(temporary_channel_id) {
+ Some(mut chan) => {
+ (chan.get_outbound_funding_created(funding_txo)
+ .map_err(|e| if let ChannelError::Close(msg) = e {
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.force_shutdown(), None)
+ } else { unreachable!(); })
+ , chan)
+ },
+ None => return
+ }
+ };
+ match handle_error!(self, res) {
+ Ok(funding_msg) => {
+ (chan, funding_msg.0, funding_msg.1)
+ },
+ Err(e) => {
+ log_error!(self, "Got bad signatures: {}!", e.err);
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_their_node_id(),
+ action: e.action,
+ });
+ return;
+ },
+ }
+ };
+ // Because we have exclusive ownership of the channel here we can release the channel_state
+ // lock before add_update_monitor
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ match e {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ match handle_error!(self, Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", *temporary_channel_id, chan.force_shutdown(), None))) {
+ Err(e) => {
+ log_error!(self, "Failed to store ChannelMonitor update for funding tx generation");
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_their_node_id(),
+ action: e.action,
+ });
+ return;
+ },
+ Ok(()) => unreachable!(),
+ }
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => {
+ // Its completely fine to continue with a FundingCreated until the monitor
+ // update is persisted, as long as we don't generate the FundingBroadcastSafe
+ // until the monitor has been safely persisted (as funding broadcast is not,
+ // in fact, safe).
+ chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
+ },
+ }
+ }
+
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+ node_id: chan.get_their_node_id(),
+ msg: msg,
+ });
+ match channel_state.by_id.entry(chan.channel_id()) {
+ hash_map::Entry::Occupied(_) => {
+ panic!("Generated duplicate funding txid?");
+ },
+ hash_map::Entry::Vacant(e) => {
+ e.insert(chan);
+ }
+ }
+ }
+
+ fn get_announcement_sigs(&self, chan: &Channel) -> Option<msgs::AnnouncementSignatures> {
+ if !chan.should_announce() { return None }
+
+ let (announcement, our_bitcoin_sig) = match chan.get_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone()) {
+ Ok(res) => res,
+ Err(_) => return None, // Only in case of state precondition violations eg channel is closing
+ };
+ let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+ let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
+
+ Some(msgs::AnnouncementSignatures {
+ channel_id: chan.channel_id(),
+ short_channel_id: chan.get_short_channel_id().unwrap(),
+ node_signature: our_node_sig,
+ bitcoin_signature: our_bitcoin_sig,
+ })
+ }
+
+ /// Processes HTLCs which are pending waiting on random forward delay.
+ ///
+ /// Should only really ever be called in response to a PendingHTLCsForwardable event.
+ /// Will likely generate further events.
+ pub fn process_pending_htlc_forwards(&self) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let mut new_events = Vec::new();
+ let mut failed_forwards = Vec::new();
+ let mut handle_errors = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+
+ for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
+ if short_chan_id != 0 {
+ let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
+ Some(chan_id) => chan_id.clone(),
+ None => {
+ failed_forwards.reserve(pending_forwards.len());
+ for forward_info in pending_forwards.drain(..) {
+ match forward_info {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: forward_info.incoming_shared_secret,
+ });
+ failed_forwards.push((htlc_source, forward_info.payment_hash, 0x4000 | 10, None));
+ },
+ HTLCForwardInfo::FailHTLC { .. } => {
+ // Channel went away before we could fail it. This implies
+ // the channel is now on chain and our counterparty is
+ // trying to broadcast the HTLC-Timeout, but that's their
+ // problem, not ours.
+ }
+ }
+ }
+ continue;
+ }
+ };
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
+ let mut add_htlc_msgs = Vec::new();
+ let mut fail_htlc_msgs = Vec::new();
+ for forward_info in pending_forwards.drain(..) {
+ match forward_info {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
+ log_trace!(self, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", log_bytes!(forward_info.payment_hash.0), prev_short_channel_id, short_chan_id);
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: forward_info.incoming_shared_secret,
+ });
+ match chan.get_mut().send_htlc(forward_info.amt_to_forward, forward_info.payment_hash, forward_info.outgoing_cltv_value, htlc_source.clone(), forward_info.onion_packet.unwrap()) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(forward_info.payment_hash.0), msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+ let chan_update = self.get_channel_update(chan.get()).unwrap();
+ failed_forwards.push((htlc_source, forward_info.payment_hash, 0x1000 | 7, Some(chan_update)));
+ continue;
+ },
+ Ok(update_add) => {
+ match update_add {
+ Some(msg) => { add_htlc_msgs.push(msg); },
+ None => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can add anymore HTLCs. The Channel
+ // will automatically handle building the update_add_htlc and
+ // commitment_signed messages when we can.
+ // TODO: Do some kind of timer to set the channel as !is_live()
+ // as we don't really want others relying on us relaying through
+ // this channel currently :/.
+ }
+ }
+ }
+ }
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ log_trace!(self, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
+ match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
+ Err(e) => {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(self, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
+ } else {
+ panic!("Stated return value requirements in get_update_fail_htlc() were not met");
+ }
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
+ },
+ Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
+ Ok(None) => {
+ // Nothing to do here...we're waiting on a remote
+ // revoke_and_ack before we can update the commitment
+ // transaction. The Channel will automatically handle
+ // building the update_fail_htlc and commitment_signed
+ // messages when we can.
+ // We don't need any kind of timer here as they should fail
+ // the channel onto the chain if they can't get our
+ // update_fail_htlc in time, it's not our problem.
+ }
+ }
+ },
+ }
+ }
+
+ if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
+ let (commitment_msg, monitor) = match chan.get_mut().send_commitment() {
+ Ok(res) => res,
+ Err(e) => {
- //TODO: Handle...this is bad!
- continue;
- },
++ // We surely failed send_commitment due to bad keys, in that case
++ // close channel and then send error message to peer.
++ let their_node_id = chan.get().get_their_node_id();
++ let err: Result<(), _> = match e {
++ ChannelError::Ignore(_) => {
++ panic!("Stated return value requirements in send_commitment() were not met");
++ },
++ ChannelError::Close(msg) => {
++ log_trace!(self, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
++ let (channel_id, mut channel) = chan.remove_entry();
++ if let Some(short_id) = channel.get_short_channel_id() {
++ channel_state.short_to_id.remove(&short_id);
++ }
++ Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, channel.force_shutdown(), self.get_channel_update(&channel).ok()))
++ },
++ ChannelError::CloseDelayBroadcast { .. } => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
++ };
++ match handle_error!(self, err) {
++ Ok(_) => unreachable!(),
++ Err(e) => {
++ match e.action {
++ msgs::ErrorAction::IgnoreError => {},
++ _ => {
++ log_error!(self, "Got bad keys: {}!", e.err);
++ let mut channel_state = self.channel_state.lock().unwrap();
++ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
++ node_id: their_node_id,
++ action: e.action,
++ });
++ },
++ }
++ continue;
++ },
+ }
- if let Some(msgs::ErrorAction::IgnoreError) = e.action {
++ }
+ };
+ if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+ handle_errors.push((chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
+ continue;
+ }
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_their_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: add_htlc_msgs,
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: fail_htlc_msgs,
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed: commitment_msg,
+ },
+ });
+ }
+ } else {
+ unreachable!();
+ }
+ } else {
+ for forward_info in pending_forwards.drain(..) {
+ match forward_info {
+ HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info } => {
+ let prev_hop_data = HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: forward_info.incoming_shared_secret,
+ };
+ match channel_state.claimable_htlcs.entry(forward_info.payment_hash) {
+ hash_map::Entry::Occupied(mut entry) => entry.get_mut().push((forward_info.amt_to_forward, prev_hop_data)),
+ hash_map::Entry::Vacant(entry) => { entry.insert(vec![(forward_info.amt_to_forward, prev_hop_data)]); },
+ };
+ new_events.push(events::Event::PaymentReceived {
+ payment_hash: forward_info.payment_hash,
+ amt: forward_info.amt_to_forward,
+ });
+ },
+ HTLCForwardInfo::FailHTLC { .. } => {
+ panic!("Got pending fail of our own HTLC");
+ }
+ }
+ }
+ }
+ }
+ }
+
+ for (htlc_source, payment_hash, failure_code, update) in failed_forwards.drain(..) {
+ match update {
+ None => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: Vec::new() }),
+ Some(chan_update) => self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code, data: chan_update.encode_with_len() }),
+ };
+ }
+
+ for (their_node_id, err) in handle_errors.drain(..) {
+ match handle_error!(self, err) {
+ Ok(_) => {},
+ Err(e) => {
- &HTLCFailReason::ErrorPacket { ref err } => {
++ if let msgs::ErrorAction::IgnoreError = e.action {
+ } else {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: their_node_id,
+ action: e.action,
+ });
+ }
+ },
+ }
+ }
+
+ if new_events.is_empty() { return }
+ let mut events = self.pending_events.lock().unwrap();
+ events.append(&mut new_events);
+ }
+
+ /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
+ /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
+ /// along the path (including in our own channel on which we received it).
+ /// Returns false if no payment was found to fail backwards, true if the process of failing the
+ /// HTLC backwards has been started.
+ pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let mut channel_state = Some(self.channel_state.lock().unwrap());
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
+ if let Some(mut sources) = removed_source {
+ for (recvd_value, htlc_with_hash) in sources.drain(..) {
+ if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
+ self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
+ HTLCSource::PreviousHopData(htlc_with_hash), payment_hash,
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: byte_utils::be64_to_array(recvd_value).to_vec() });
+ }
+ true
+ } else { false }
+ }
+
+ /// Fails an HTLC backwards to the sender of it to us.
+ /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
+ /// There are several callsites that do stupid things like loop over a list of payment_hashes
+ /// to fail and take the channel_state lock for each iteration (as we take ownership and may
+ /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
+ /// still-available channels.
+ fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+ //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
+ //identify whether we sent it or not based on the (I presume) very different runtime
+ //between the branches here. We should make this async and move it into the forward HTLCs
+ //timer handling.
+ match source {
+ HTLCSource::OutboundRoute { ref route, .. } => {
+ log_trace!(self, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+ mem::drop(channel_state_lock);
+ match &onion_error {
- HTLCFailReason::ErrorPacket { err } => {
- log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built ErrorPacket", log_bytes!(payment_hash.0));
++ &HTLCFailReason::LightningError { ref err } => {
+#[cfg(test)]
+ let (channel_update, payment_retryable, onion_error_code) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+#[cfg(not(test))]
+ let (channel_update, payment_retryable, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
+ // TODO: If we decided to blame ourselves (or one of our channels) in
+ // process_onion_failure we should close that channel as it implies our
+ // next-hop is needlessly blaming us!
+ if let Some(update) = channel_update {
+ self.channel_state.lock().unwrap().pending_msg_events.push(
+ events::MessageSendEvent::PaymentFailureNetworkUpdate {
+ update,
+ }
+ );
+ }
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentFailed {
+ payment_hash: payment_hash.clone(),
+ rejected_by_dest: !payment_retryable,
+#[cfg(test)]
+ error_code: onion_error_code
+ }
+ );
+ },
+ &HTLCFailReason::Reason {
+#[cfg(test)]
+ ref failure_code,
+ .. } => {
+ // we get a fail_malformed_htlc from the first hop
+ // TODO: We'd like to generate a PaymentFailureNetworkUpdate for temporary
+ // failures here, but that would be insufficient as Router::get_route
+ // generally ignores its view of our own channels as we provide them via
+ // ChannelDetails.
+ // TODO: For non-temporary failures, we really should be closing the
+ // channel here as we apparently can't relay through them anyway.
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentFailed {
+ payment_hash: payment_hash.clone(),
+ rejected_by_dest: route.hops.len() == 1,
+#[cfg(test)]
+ error_code: Some(*failure_code),
+ }
+ );
+ }
+ }
+ },
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret }) => {
+ let err_packet = match onion_error {
+ HTLCFailReason::Reason { failure_code, data } => {
+ log_trace!(self, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
+ let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
+ onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
+ },
- if let Some(msgs::ErrorAction::IgnoreError) = e.action {
++ HTLCFailReason::LightningError { err } => {
++ log_trace!(self, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0));
+ onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
+ }
+ };
+
+ let mut forward_event = None;
+ if channel_state_lock.forward_htlcs.is_empty() {
+ forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
+ }
+ match channel_state_lock.forward_htlcs.entry(short_channel_id) {
+ hash_map::Entry::Occupied(mut entry) => {
+ entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
+ },
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
+ }
+ }
+ mem::drop(channel_state_lock);
+ if let Some(time) = forward_event {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: time
+ });
+ }
+ },
+ }
+ }
+
+ /// Provides a payment preimage in response to a PaymentReceived event, returning true and
+ /// generating message events for the net layer to claim the payment, if possible. Thus, you
+ /// should probably kick the net layer to go send messages if this returns true!
+ ///
+ /// May panic if called except in response to a PaymentReceived event.
+ pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
+
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ let mut channel_state = Some(self.channel_state.lock().unwrap());
+ let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
+ if let Some(mut sources) = removed_source {
+ // TODO: We should require the user specify the expected amount so that we can claim
+ // only payments for the correct amount, and reject payments for incorrect amounts
+ // (which are probably middle nodes probing to break our privacy).
+ for (_, htlc_with_hash) in sources.drain(..) {
+ if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
+ self.claim_funds_internal(channel_state.take().unwrap(), HTLCSource::PreviousHopData(htlc_with_hash), payment_preimage);
+ }
+ true
+ } else { false }
+ }
+ fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage) {
+ let (their_node_id, err) = loop {
+ match source {
+ HTLCSource::OutboundRoute { .. } => {
+ mem::drop(channel_state_lock);
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentSent {
+ payment_preimage
+ });
+ },
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, .. }) => {
+ //TODO: Delay the claimed_funds relaying just like we do outbound relay!
+ let channel_state = channel_state_lock.borrow_parts();
+
+ let chan_id = match channel_state.short_to_id.get(&short_channel_id) {
+ Some(chan_id) => chan_id.clone(),
+ None => {
+ // TODO: There is probably a channel manager somewhere that needs to
+ // learn the preimage as the channel already hit the chain and that's
+ // why it's missing.
+ return
+ }
+ };
+
+ if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
+ let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+ match chan.get_mut().get_update_fulfill_htlc_and_commit(htlc_id, payment_preimage) {
+ Ok((msgs, monitor_option)) => {
+ if let Some(chan_monitor) = monitor_option {
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ if was_frozen_for_monitor {
+ assert!(msgs.is_none());
+ } else {
+ break (chan.get().get_their_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()));
+ }
+ }
+ }
+ if let Some((msg, commitment_signed)) = msgs {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_their_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: vec![msg],
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ }
+ });
+ }
+ },
+ Err(_e) => {
+ // TODO: There is probably a channel manager somewhere that needs to
+ // learn the preimage as the channel may be about to hit the chain.
+ //TODO: Do something with e?
+ return
+ },
+ }
+ } else { unreachable!(); }
+ },
+ }
+ return;
+ };
+
+ match handle_error!(self, err) {
+ Ok(_) => {},
+ Err(e) => {
- try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::ErrorPacket { err: msg.reason.clone() }), channel_state, chan);
++ if let msgs::ErrorAction::IgnoreError = e.action {
+ } else {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: their_node_id,
+ action: e.action,
+ });
+ }
+ },
+ }
+ }
+
+ /// Gets the node_id held by this ChannelManager
+ pub fn get_our_node_id(&self) -> PublicKey {
+ PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key)
+ }
+
+ /// Used to restore channels to normal operation after a
+ /// ChannelMonitorUpdateErr::TemporaryFailure was returned from a channel monitor update
+ /// operation.
+ pub fn test_restore_channel_monitor(&self) {
+ let mut close_results = Vec::new();
+ let mut htlc_forwards = Vec::new();
+ let mut htlc_failures = Vec::new();
+ let mut pending_events = Vec::new();
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ let pending_msg_events = channel_state.pending_msg_events;
+ channel_state.by_id.retain(|_, channel| {
+ if channel.is_awaiting_monitor_update() {
+ let chan_monitor = channel.channel_monitor();
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ match e {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ // TODO: There may be some pending HTLCs that we intended to fail
+ // backwards when a monitor update failed. We should make sure
+ // knowledge of those gets moved into the appropriate in-memory
+ // ChannelMonitor and they get failed backwards once we get
+ // on-chain confirmations.
+ // Note I think #198 addresses this, so once it's merged a test
+ // should be written.
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ close_results.push(channel.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&channel) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ false
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => true,
+ }
+ } else {
+ let (raa, commitment_update, order, pending_forwards, mut pending_failures, needs_broadcast_safe, funding_locked) = channel.monitor_updating_restored();
+ if !pending_forwards.is_empty() {
+ htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), pending_forwards));
+ }
+ htlc_failures.append(&mut pending_failures);
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = commitment_update {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: channel.get_their_node_id(),
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = raa {
+ pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: channel.get_their_node_id(),
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+ if needs_broadcast_safe {
+ pending_events.push(events::Event::FundingBroadcastSafe {
+ funding_txo: channel.get_funding_txo().unwrap(),
+ user_channel_id: channel.get_user_id(),
+ });
+ }
+ if let Some(msg) = funding_locked {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: channel.get_their_node_id(),
+ msg,
+ });
+ if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: channel.get_their_node_id(),
+ msg: announcement_sigs,
+ });
+ }
+ short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+ }
+ true
+ }
+ } else { true }
+ });
+ }
+
+ self.pending_events.lock().unwrap().append(&mut pending_events);
+
+ for failure in htlc_failures.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ }
+ self.forward_htlcs(&mut htlc_forwards[..]);
+
+ for res in close_results.drain(..) {
+ self.finish_force_close_channel(res);
+ }
+ }
+
+ fn internal_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ if msg.chain_hash != self.genesis_hash {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash", msg.temporary_channel_id.clone()));
+ }
+
+ let channel = Channel::new_from_req(&*self.fee_estimator, &self.keys_manager, their_node_id.clone(), their_local_features, msg, 0, Arc::clone(&self.logger), &self.default_configuration)
+ .map_err(|e| MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id))?;
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(channel.channel_id()) {
+ hash_map::Entry::Occupied(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!", msg.temporary_channel_id.clone())),
+ hash_map::Entry::Vacant(entry) => {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: their_node_id.clone(),
+ msg: channel.get_accept_channel(),
+ });
+ entry.insert(channel);
+ }
+ }
+ Ok(())
+ }
+
+ fn internal_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+ let (value, output_script, user_id) = {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.temporary_channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: see issue #153, need a consistent behavior on obnoxious behavior from random node
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
+ }
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration, their_local_features), channel_state, chan);
+ (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+ },
+ //TODO: same as above
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
+ }
+ };
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::FundingGenerationReady {
+ temporary_channel_id: msg.temporary_channel_id,
+ channel_value_satoshis: value,
+ output_script: output_script,
+ user_channel_id: user_id,
+ });
+ Ok(())
+ }
+
+ fn internal_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
+ let ((funding_msg, monitor_update), mut chan) = {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.temporary_channel_id));
+ }
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg), channel_state, chan), chan.remove())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.temporary_channel_id))
+ }
+ };
+ // Because we have exclusive ownership of the channel here we can release the channel_state
+ // lock before add_update_monitor
+ if let Err(e) = self.monitor.add_update_monitor(monitor_update.get_funding_txo().unwrap(), monitor_update) {
+ match e {
+ ChannelMonitorUpdateErr::PermanentFailure => {
+ // Note that we reply with the new channel_id in error messages if we gave up on the
+ // channel, not the temporary_channel_id. This is compatible with ourselves, but the
+ // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
+ // any messages referencing a previously-closed channel anyway.
+ return Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure", funding_msg.channel_id, chan.force_shutdown(), None));
+ },
+ ChannelMonitorUpdateErr::TemporaryFailure => {
+ // There's no problem signing a counterparty's funding transaction if our monitor
+ // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
+ // accepted payment from yet. We do, however, need to wait to send our funding_locked
+ // until we have persisted our monitor.
+ chan.monitor_update_failed(false, false, Vec::new(), Vec::new());
+ },
+ }
+ }
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(funding_msg.channel_id) {
+ hash_map::Entry::Occupied(_) => {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id", funding_msg.channel_id))
+ },
+ hash_map::Entry::Vacant(e) => {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+ node_id: their_node_id.clone(),
+ msg: funding_msg,
+ });
+ e.insert(chan);
+ }
+ }
+ Ok(())
+ }
+
+ fn internal_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
+ let (funding_txo, user_id) = {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ let chan_monitor = try_chan_entry!(self, chan.get_mut().funding_signed(&msg), channel_state, chan);
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
+ }
+ (chan.get().get_funding_txo().unwrap(), chan.get().get_user_id())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ };
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::FundingBroadcastSafe {
+ funding_txo: funding_txo,
+ user_channel_id: user_id,
+ });
+ Ok(())
+ }
+
+ fn internal_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ try_chan_entry!(self, chan.get_mut().funding_locked(&msg), channel_state, chan);
+ if let Some(announcement_sigs) = self.get_announcement_sigs(chan.get()) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: their_node_id.clone(),
+ msg: announcement_sigs,
+ });
+ }
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ }
+
+ fn internal_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
+ let (mut dropped_htlcs, chan_option) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+
+ match channel_state.by_id.entry(msg.channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ if chan_entry.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ let (shutdown, closing_signed, dropped_htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&*self.fee_estimator, &msg), channel_state, chan_entry);
+ if let Some(msg) = shutdown {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: their_node_id.clone(),
+ msg,
+ });
+ }
+ if let Some(msg) = closing_signed {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: their_node_id.clone(),
+ msg,
+ });
+ }
+ if chan_entry.get().is_shutdown() {
+ if let Some(short_id) = chan_entry.get().get_short_channel_id() {
+ channel_state.short_to_id.remove(&short_id);
+ }
+ (dropped_htlcs, Some(chan_entry.remove_entry().1))
+ } else { (dropped_htlcs, None) }
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ };
+ for htlc_source in dropped_htlcs.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ if let Some(chan) = chan_option {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ }
+ Ok(())
+ }
+
+ fn internal_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
+ let (tx, chan_option) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ if chan_entry.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&*self.fee_estimator, &msg), channel_state, chan_entry);
+ if let Some(msg) = closing_signed {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: their_node_id.clone(),
+ msg,
+ });
+ }
+ if tx.is_some() {
+ // We're done with this channel, we've got a signed closing transaction and
+ // will send the closing_signed back to the remote peer upon return. This
+ // also implies there are no pending HTLCs left on the channel, so we can
+ // fully delete it from tracking (the channel monitor is still around to
+ // watch for old state broadcasts)!
+ if let Some(short_id) = chan_entry.get().get_short_channel_id() {
+ channel_state.short_to_id.remove(&short_id);
+ }
+ (tx, Some(chan_entry.remove_entry().1))
+ } else { (tx, None) }
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ };
+ if let Some(broadcast_tx) = tx {
+ self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
+ }
+ if let Some(chan) = chan_option {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ }
+ Ok(())
+ }
+
+ fn internal_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
+ //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
+ //determine the state of the payment based on our response/if we forward anything/the time
+ //we take to respond. We should take care to avoid allowing such an attack.
+ //
+ //TODO: There exists a further attack where a node may garble the onion data, forward it to
+ //us repeatedly garbled in different ways, and compare our error messages, which are
+ //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
+ //but we should prevent it anyway.
+
+ let (mut pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
+ let channel_state = channel_state_lock.borrow_parts();
+
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ if !chan.get().is_usable() {
+ // If the update_add is completely bogus, the call will Err and we will close,
+ // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+ // want to reject the new HTLC and fail it backwards instead of forwarding.
+ if let PendingHTLCStatus::Forward(PendingForwardHTLCInfo { incoming_shared_secret, .. }) = pending_forward_info {
+ let chan_update = self.get_channel_update(chan.get());
+ pending_forward_info = PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason: if let Ok(update) = chan_update {
+ // TODO: Note that |20 is defined as "channel FROM the processing
+ // node has been disabled" (emphasis mine), which seems to imply
+ // that we can't return |20 for an inbound channel being disabled.
+ // This probably needs a spec update but should definitely be
+ // allowed.
+ onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x1000|20, &{
+ let mut res = Vec::with_capacity(8 + 128);
+ res.extend_from_slice(&byte_utils::be16_to_array(update.contents.flags));
+ res.extend_from_slice(&update.encode_with_len()[..]);
+ res
+ }[..])
+ } else {
+ // This can only happen if the channel isn't in the fully-funded
+ // state yet, implying our counterparty is trying to route payments
+ // over the channel back to themselves (cause no one else should
+ // know the short_id is a lightning channel yet). We should have no
+ // problem just calling this unknown_next_peer
+ onion_utils::build_first_hop_failure_packet(&incoming_shared_secret, 0x4000|10, &[])
+ },
+ }));
+ }
+ }
+ try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info), channel_state, chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ Ok(())
+ }
+
+ fn internal_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let htlc_source = {
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ };
+ self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone());
+ Ok(())
+ }
+
+ fn internal_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
- return Err(MsgHandleErrInternal::from_no_close(HandleError{err: "Got an announcement_signatures before we were ready for it", action: Some(msgs::ErrorAction::IgnoreError)}));
++ try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ Ok(())
+ }
+
+ fn internal_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ if (msg.failure_code & 0x8000) == 0 {
+ try_chan_entry!(self, Err(ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set")), channel_state, chan);
+ }
+ try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ }
+
+ fn internal_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ let (revoke_and_ack, commitment_signed, closing_signed, chan_monitor) =
+ try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &*self.fee_estimator), channel_state, chan);
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
+ //TODO: Rebroadcast closing_signed if present on monitor update restoration
+ }
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: their_node_id.clone(),
+ msg: revoke_and_ack,
+ });
+ if let Some(msg) = commitment_signed {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: their_node_id.clone(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed: msg,
+ },
+ });
+ }
+ if let Some(msg) = closing_signed {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: their_node_id.clone(),
+ msg,
+ });
+ }
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ }
+
+ #[inline]
+ fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, Vec<(PendingForwardHTLCInfo, u64)>)]) {
+ for &mut (prev_short_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
+ let mut forward_event = None;
+ if !pending_forwards.is_empty() {
+ let mut channel_state = self.channel_state.lock().unwrap();
+ if channel_state.forward_htlcs.is_empty() {
+ forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
+ }
+ for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
+ match channel_state.forward_htlcs.entry(forward_info.short_channel_id) {
+ hash_map::Entry::Occupied(mut entry) => {
+ entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info });
+ },
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info }));
+ }
+ }
+ }
+ }
+ match forward_event {
+ Some(time) => {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PendingHTLCsForwardable {
+ time_forwardable: time
+ });
+ }
+ None => {},
+ }
+ }
+ }
+
+ fn internal_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
+ let (pending_forwards, mut pending_failures, short_channel_id) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
+ let (commitment_update, pending_forwards, pending_failures, closing_signed, chan_monitor) =
+ try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &*self.fee_estimator), channel_state, chan);
+ if let Err(e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ if was_frozen_for_monitor {
+ assert!(commitment_update.is_none() && closing_signed.is_none() && pending_forwards.is_empty() && pending_failures.is_empty());
+ return Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA"));
+ } else {
+ return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, commitment_update.is_some(), pending_forwards, pending_failures);
+ }
+ }
+ if let Some(updates) = commitment_update {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: their_node_id.clone(),
+ updates,
+ });
+ }
+ if let Some(msg) = closing_signed {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: their_node_id.clone(),
+ msg,
+ });
+ }
+ (pending_forwards, pending_failures, chan.get().get_short_channel_id().expect("RAA should only work on a short-id-available channel"))
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ };
+ for failure in pending_failures.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ }
+ self.forward_htlcs(&mut [(short_channel_id, pending_forwards)]);
+
+ Ok(())
+ }
+
+ fn internal_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ //TODO: here and below MsgHandleErrInternal, #153 case
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ try_chan_entry!(self, chan.get_mut().update_fee(&*self.fee_estimator, &msg), channel_state, chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ Ok(())
+ }
+
+ fn internal_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ if !chan.get().is_usable() {
- if let Some(msgs::ErrorAction::IgnoreError) = e.action {
++ return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it", action: msgs::ErrorAction::IgnoreError}));
+ }
+
+ let our_node_id = self.get_our_node_id();
+ let (announcement, our_bitcoin_sig) =
+ try_chan_entry!(self, chan.get_mut().get_channel_announcement(our_node_id.clone(), self.genesis_hash.clone()), channel_state, chan);
+
+ let were_node_one = announcement.node_id_1 == our_node_id;
+ let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
+ if self.secp_ctx.verify(&msghash, &msg.node_signature, if were_node_one { &announcement.node_id_2 } else { &announcement.node_id_1 }).is_err() ||
+ self.secp_ctx.verify(&msghash, &msg.bitcoin_signature, if were_node_one { &announcement.bitcoin_key_2 } else { &announcement.bitcoin_key_1 }).is_err() {
+ try_chan_entry!(self, Err(ChannelError::Close("Bad announcement_signatures node_signature")), channel_state, chan);
+ }
+
+ let our_node_sig = self.secp_ctx.sign(&msghash, &self.our_network_key);
+
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+ msg: msgs::ChannelAnnouncement {
+ node_signature_1: if were_node_one { our_node_sig } else { msg.node_signature },
+ node_signature_2: if were_node_one { msg.node_signature } else { our_node_sig },
+ bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { msg.bitcoin_signature },
+ bitcoin_signature_2: if were_node_one { msg.bitcoin_signature } else { our_bitcoin_sig },
+ contents: announcement,
+ },
+ update_msg: self.get_channel_update(chan.get()).unwrap(), // can only fail if we're not in a ready state
+ });
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ Ok(())
+ }
+
+ fn internal_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_their_node_id() != *their_node_id {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!", msg.channel_id));
+ }
+ let (funding_locked, revoke_and_ack, commitment_update, channel_monitor, mut order, shutdown) =
+ try_chan_entry!(self, chan.get_mut().channel_reestablish(msg), channel_state, chan);
+ if let Some(monitor) = channel_monitor {
+ if let Err(e) = self.monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor) {
+ // channel_reestablish doesn't guarantee the order it returns is sensical
+ // for the messages it returns, but if we're setting what messages to
+ // re-transmit on monitor update success, we need to make sure it is sane.
+ if revoke_and_ack.is_none() {
+ order = RAACommitmentOrder::CommitmentFirst;
+ }
+ if commitment_update.is_none() {
+ order = RAACommitmentOrder::RevokeAndACKFirst;
+ }
+ return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some());
+ //TODO: Resend the funding_locked if needed once we get the monitor running again
+ }
+ }
+ if let Some(msg) = funding_locked {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: their_node_id.clone(),
+ msg
+ });
+ }
+ macro_rules! send_raa { () => {
+ if let Some(msg) = revoke_and_ack {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: their_node_id.clone(),
+ msg
+ });
+ }
+ } }
+ macro_rules! send_cu { () => {
+ if let Some(updates) = commitment_update {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: their_node_id.clone(),
+ updates
+ });
+ }
+ } }
+ match order {
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ send_raa!();
+ send_cu!();
+ },
+ RAACommitmentOrder::CommitmentFirst => {
+ send_cu!();
+ send_raa!();
+ },
+ }
+ if let Some(msg) = shutdown {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: their_node_id.clone(),
+ msg,
+ });
+ }
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel", msg.channel_id))
+ }
+ }
+
+ /// Begin Update fee process. Allowed only on an outbound channel.
+ /// If successful, will generate a UpdateHTLCs event, so you should probably poll
+ /// PeerManager::process_events afterwards.
+ /// Note: This API is likely to change!
+ #[doc(hidden)]
+ pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u64) -> Result<(), APIError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ let their_node_id;
+ let err: Result<(), _> = loop {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+
+ match channel_state.by_id.entry(channel_id) {
+ hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: "Failed to find corresponding channel"}),
+ hash_map::Entry::Occupied(mut chan) => {
+ if !chan.get().is_outbound() {
+ return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel"});
+ }
+ if chan.get().is_awaiting_monitor_update() {
+ return Err(APIError::MonitorUpdateFailed);
+ }
+ if !chan.get().is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected"});
+ }
+ their_node_id = chan.get().get_their_node_id();
+ if let Some((update_fee, commitment_signed, chan_monitor)) =
+ break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw), channel_state, chan)
+ {
+ if let Err(_e) = self.monitor.add_update_monitor(chan_monitor.get_funding_txo().unwrap(), chan_monitor) {
+ unimplemented!();
+ }
+ channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get().get_their_node_id(),
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: Some(update_fee),
+ commitment_signed,
+ },
+ });
+ }
+ },
+ }
+ return Ok(())
+ };
+
+ match handle_error!(self, err) {
+ Ok(_) => unreachable!(),
+ Err(e) => {
- action: Some(msgs::ErrorAction::SendErrorMessage { msg: e }),
++ if let msgs::ErrorAction::IgnoreError = e.action {
+ } else {
+ log_error!(self, "Got bad keys: {}!", e.err);
+ let mut channel_state = self.channel_state.lock().unwrap();
+ channel_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: their_node_id,
+ action: e.action,
+ });
+ }
+ Err(APIError::APIMisuseError { err: e.err })
+ },
+ }
+ }
+}
+
+impl events::MessageSendEventsProvider for ChannelManager {
+ fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+ // TODO: Event release to users and serialization is currently race-y: it's very easy for a
+ // user to serialize a ChannelManager with pending events in it and lose those events on
+ // restart. This is doubly true for the fail/fulfill-backs from monitor events!
+ {
+ //TODO: This behavior should be documented.
+ for htlc_update in self.monitor.fetch_pending_htlc_updated() {
+ if let Some(preimage) = htlc_update.payment_preimage {
+ log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+ } else {
+ log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ }
+ }
+
+ let mut ret = Vec::new();
+ let mut channel_state = self.channel_state.lock().unwrap();
+ mem::swap(&mut ret, &mut channel_state.pending_msg_events);
+ ret
+ }
+}
+
+impl events::EventsProvider for ChannelManager {
+ fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ // TODO: Event release to users and serialization is currently race-y: it's very easy for a
+ // user to serialize a ChannelManager with pending events in it and lose those events on
+ // restart. This is doubly true for the fail/fulfill-backs from monitor events!
+ {
+ //TODO: This behavior should be documented.
+ for htlc_update in self.monitor.fetch_pending_htlc_updated() {
+ if let Some(preimage) = htlc_update.payment_preimage {
+ log_trace!(self, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+ } else {
+ log_trace!(self, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ }
+ }
+
+ let mut ret = Vec::new();
+ let mut pending_events = self.pending_events.lock().unwrap();
+ mem::swap(&mut ret, &mut *pending_events);
+ ret
+ }
+}
+
+impl ChainListener for ChannelManager {
+ fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], indexes_of_txn_matched: &[u32]) {
+ let header_hash = header.bitcoin_hash();
+ log_trace!(self, "Block {} at height {} connected with {} txn matched", header_hash, height, txn_matched.len());
+ let _ = self.total_consistency_lock.read().unwrap();
+ let mut failed_channels = Vec::new();
+ {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ let pending_msg_events = channel_state.pending_msg_events;
+ channel_state.by_id.retain(|_, channel| {
+ let chan_res = channel.block_connected(header, height, txn_matched, indexes_of_txn_matched);
+ if let Ok(Some(funding_locked)) = chan_res {
+ pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: channel.get_their_node_id(),
+ msg: funding_locked,
+ });
+ if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
+ pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: channel.get_their_node_id(),
+ msg: announcement_sigs,
+ });
+ }
+ short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
+ } else if let Err(e) = chan_res {
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: channel.get_their_node_id(),
- fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), HandleError> {
++ action: msgs::ErrorAction::SendErrorMessage { msg: e },
+ });
+ return false;
+ }
+ if let Some(funding_txo) = channel.get_funding_txo() {
+ for tx in txn_matched {
+ for inp in tx.input.iter() {
+ if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
+ log_trace!(self, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, log_bytes!(channel.channel_id()));
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ // It looks like our counterparty went on-chain. We go ahead and
+ // broadcast our latest local state as well here, just in case its
+ // some kind of SPV attack, though we expect these to be dropped.
+ failed_channels.push(channel.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&channel) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ return false;
+ }
+ }
+ }
+ }
+ if channel.is_funding_initiated() && channel.channel_monitor().would_broadcast_at_height(height) {
+ if let Some(short_id) = channel.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(channel.force_shutdown());
+ // If would_broadcast_at_height() is true, the channel_monitor will broadcast
+ // the latest local tx for us, so we should skip that here (it doesn't really
+ // hurt anything, but does make tests a bit simpler).
+ failed_channels.last_mut().unwrap().0 = Vec::new();
+ if let Ok(update) = self.get_channel_update(&channel) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ return false;
+ }
+ true
+ });
+ }
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
+ }
+ self.latest_block_height.store(height as usize, Ordering::Release);
+ *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header_hash;
+ }
+
+ /// We force-close the channel without letting our counterparty participate in the shutdown
+ fn block_disconnected(&self, header: &BlockHeader, _: u32) {
+ let _ = self.total_consistency_lock.read().unwrap();
+ let mut failed_channels = Vec::new();
+ {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ let pending_msg_events = channel_state.pending_msg_events;
+ channel_state.by_id.retain(|_, v| {
+ if v.block_disconnected(header) {
+ if let Some(short_id) = v.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(v.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&v) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ false
+ } else {
+ true
+ }
+ });
+ }
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
+ }
+ self.latest_block_height.fetch_sub(1, Ordering::AcqRel);
+ *self.last_block_hash.try_lock().expect("block_(dis)connected must not be called in parallel") = header.bitcoin_hash();
+ }
+}
+
+impl ChannelMessageHandler for ChannelManager {
+ //TODO: Handle errors and close channel (or so)
- fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
++ fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::OpenChannel) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_open_channel(their_node_id, their_local_features, msg))
+ }
+
- fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), HandleError> {
++ fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &msgs::AcceptChannel) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_accept_channel(their_node_id, their_local_features, msg))
+ }
+
- fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), HandleError> {
++ fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_funding_created(their_node_id, msg))
+ }
+
- fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), HandleError> {
++ fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_funding_signed(their_node_id, msg))
+ }
+
- fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), HandleError> {
++ fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_funding_locked(their_node_id, msg))
+ }
+
- fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), HandleError> {
++ fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_shutdown(their_node_id, msg))
+ }
+
- fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), msgs::HandleError> {
++ fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_closing_signed(their_node_id, msg))
+ }
+
- fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
++ fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_update_add_htlc(their_node_id, msg))
+ }
+
- fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
++ fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_update_fulfill_htlc(their_node_id, msg))
+ }
+
- fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
++ fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_update_fail_htlc(their_node_id, msg))
+ }
+
- fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), HandleError> {
++ fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_update_fail_malformed_htlc(their_node_id, msg))
+ }
+
- fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), HandleError> {
++ fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_commitment_signed(their_node_id, msg))
+ }
+
- fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), HandleError> {
++ fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_revoke_and_ack(their_node_id, msg))
+ }
+
- fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
++ fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_update_fee(their_node_id, msg))
+ }
+
- fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), HandleError> {
++ fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_announcement_signatures(their_node_id, msg))
+ }
+
- &HTLCFailReason::ErrorPacket { ref err } => {
++ fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), LightningError> {
+ let _ = self.total_consistency_lock.read().unwrap();
+ handle_error!(self, self.internal_channel_reestablish(their_node_id, msg))
+ }
+
+ fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool) {
+ let _ = self.total_consistency_lock.read().unwrap();
+ let mut failed_channels = Vec::new();
+ let mut failed_payments = Vec::new();
+ {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ let short_to_id = channel_state.short_to_id;
+ let pending_msg_events = channel_state.pending_msg_events;
+ if no_connection_possible {
+ log_debug!(self, "Failing all channels with {} due to no_connection_possible", log_pubkey!(their_node_id));
+ channel_state.by_id.retain(|_, chan| {
+ if chan.get_their_node_id() == *their_node_id {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(chan.force_shutdown());
+ if let Ok(update) = self.get_channel_update(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ false
+ } else {
+ true
+ }
+ });
+ } else {
+ log_debug!(self, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(their_node_id));
+ channel_state.by_id.retain(|_, chan| {
+ if chan.get_their_node_id() == *their_node_id {
+ //TODO: mark channel disabled (and maybe announce such after a timeout).
+ let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused();
+ if !failed_adds.is_empty() {
+ let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
+ failed_payments.push((chan_update, failed_adds));
+ }
+ if chan.is_shutdown() {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ return false;
+ }
+ }
+ true
+ })
+ }
+ pending_msg_events.retain(|msg| {
+ match msg {
+ &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
+ &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != their_node_id,
+ &events::MessageSendEvent::PaymentFailureNetworkUpdate { .. } => true,
+ }
+ });
+ }
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
+ }
+ for (chan_update, mut htlc_sources) in failed_payments {
+ for (htlc_source, payment_hash) in htlc_sources.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
+ }
+ }
+ }
+
+ fn peer_connected(&self, their_node_id: &PublicKey) {
+ log_debug!(self, "Generating channel_reestablish events for {}", log_pubkey!(their_node_id));
+
+ let _ = self.total_consistency_lock.read().unwrap();
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = channel_state_lock.borrow_parts();
+ let pending_msg_events = channel_state.pending_msg_events;
+ channel_state.by_id.retain(|_, chan| {
+ if chan.get_their_node_id() == *their_node_id {
+ if !chan.have_received_message() {
+ // If we created this (outbound) channel while we were disconnected from the
+ // peer we probably failed to send the open_channel message, which is now
+ // lost. We can't have had anything pending related to this channel, so we just
+ // drop it.
+ false
+ } else {
+ pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
+ node_id: chan.get_their_node_id(),
+ msg: chan.get_channel_reestablish(),
+ });
+ true
+ }
+ } else { true }
+ });
+ //TODO: Also re-broadcast announcement_signatures
+ }
+
+ fn handle_error(&self, their_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
+ let _ = self.total_consistency_lock.read().unwrap();
+
+ if msg.channel_id == [0; 32] {
+ for chan in self.list_channels() {
+ if chan.remote_network_id == *their_node_id {
+ self.force_close_channel(&chan.channel_id);
+ }
+ }
+ } else {
+ self.force_close_channel(&msg.channel_id);
+ }
+ }
+}
+
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
+
+impl Writeable for PendingForwardHTLCInfo {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ self.onion_packet.write(writer)?;
+ self.incoming_shared_secret.write(writer)?;
+ self.payment_hash.write(writer)?;
+ self.short_channel_id.write(writer)?;
+ self.amt_to_forward.write(writer)?;
+ self.outgoing_cltv_value.write(writer)?;
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for PendingForwardHTLCInfo {
+ fn read(reader: &mut R) -> Result<PendingForwardHTLCInfo, DecodeError> {
+ Ok(PendingForwardHTLCInfo {
+ onion_packet: Readable::read(reader)?,
+ incoming_shared_secret: Readable::read(reader)?,
+ payment_hash: Readable::read(reader)?,
+ short_channel_id: Readable::read(reader)?,
+ amt_to_forward: Readable::read(reader)?,
+ outgoing_cltv_value: Readable::read(reader)?,
+ })
+ }
+}
+
+impl Writeable for HTLCFailureMsg {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &HTLCFailureMsg::Relay(ref fail_msg) => {
+ 0u8.write(writer)?;
+ fail_msg.write(writer)?;
+ },
+ &HTLCFailureMsg::Malformed(ref fail_msg) => {
+ 1u8.write(writer)?;
+ fail_msg.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for HTLCFailureMsg {
+ fn read(reader: &mut R) -> Result<HTLCFailureMsg, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(HTLCFailureMsg::Relay(Readable::read(reader)?)),
+ 1 => Ok(HTLCFailureMsg::Malformed(Readable::read(reader)?)),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for PendingHTLCStatus {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &PendingHTLCStatus::Forward(ref forward_info) => {
+ 0u8.write(writer)?;
+ forward_info.write(writer)?;
+ },
+ &PendingHTLCStatus::Fail(ref fail_msg) => {
+ 1u8.write(writer)?;
+ fail_msg.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for PendingHTLCStatus {
+ fn read(reader: &mut R) -> Result<PendingHTLCStatus, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(PendingHTLCStatus::Forward(Readable::read(reader)?)),
+ 1 => Ok(PendingHTLCStatus::Fail(Readable::read(reader)?)),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl_writeable!(HTLCPreviousHopData, 0, {
+ short_channel_id,
+ htlc_id,
+ incoming_packet_shared_secret
+});
+
+impl Writeable for HTLCSource {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &HTLCSource::PreviousHopData(ref hop_data) => {
+ 0u8.write(writer)?;
+ hop_data.write(writer)?;
+ },
+ &HTLCSource::OutboundRoute { ref route, ref session_priv, ref first_hop_htlc_msat } => {
+ 1u8.write(writer)?;
+ route.write(writer)?;
+ session_priv.write(writer)?;
+ first_hop_htlc_msat.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for HTLCSource {
+ fn read(reader: &mut R) -> Result<HTLCSource, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
+ 1 => Ok(HTLCSource::OutboundRoute {
+ route: Readable::read(reader)?,
+ session_priv: Readable::read(reader)?,
+ first_hop_htlc_msat: Readable::read(reader)?,
+ }),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for HTLCFailReason {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
- 0 => Ok(HTLCFailReason::ErrorPacket { err: Readable::read(reader)? }),
++ &HTLCFailReason::LightningError { ref err } => {
+ 0u8.write(writer)?;
+ err.write(writer)?;
+ },
+ &HTLCFailReason::Reason { ref failure_code, ref data } => {
+ 1u8.write(writer)?;
+ failure_code.write(writer)?;
+ data.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for HTLCFailReason {
+ fn read(reader: &mut R) -> Result<HTLCFailReason, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
++ 0 => Ok(HTLCFailReason::LightningError { err: Readable::read(reader)? }),
+ 1 => Ok(HTLCFailReason::Reason {
+ failure_code: Readable::read(reader)?,
+ data: Readable::read(reader)?,
+ }),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for HTLCForwardInfo {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &HTLCForwardInfo::AddHTLC { ref prev_short_channel_id, ref prev_htlc_id, ref forward_info } => {
+ 0u8.write(writer)?;
+ prev_short_channel_id.write(writer)?;
+ prev_htlc_id.write(writer)?;
+ forward_info.write(writer)?;
+ },
+ &HTLCForwardInfo::FailHTLC { ref htlc_id, ref err_packet } => {
+ 1u8.write(writer)?;
+ htlc_id.write(writer)?;
+ err_packet.write(writer)?;
+ },
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for HTLCForwardInfo {
+ fn read(reader: &mut R) -> Result<HTLCForwardInfo, DecodeError> {
+ match <u8 as Readable<R>>::read(reader)? {
+ 0 => Ok(HTLCForwardInfo::AddHTLC {
+ prev_short_channel_id: Readable::read(reader)?,
+ prev_htlc_id: Readable::read(reader)?,
+ forward_info: Readable::read(reader)?,
+ }),
+ 1 => Ok(HTLCForwardInfo::FailHTLC {
+ htlc_id: Readable::read(reader)?,
+ err_packet: Readable::read(reader)?,
+ }),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
+
+impl Writeable for ChannelManager {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ let _ = self.total_consistency_lock.write().unwrap();
+
+ writer.write_all(&[SERIALIZATION_VERSION; 1])?;
+ writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+
+ self.genesis_hash.write(writer)?;
+ (self.latest_block_height.load(Ordering::Acquire) as u32).write(writer)?;
+ self.last_block_hash.lock().unwrap().write(writer)?;
+
+ let channel_state = self.channel_state.lock().unwrap();
+ let mut unfunded_channels = 0;
+ for (_, channel) in channel_state.by_id.iter() {
+ if !channel.is_funding_initiated() {
+ unfunded_channels += 1;
+ }
+ }
+ ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
+ for (_, channel) in channel_state.by_id.iter() {
+ if channel.is_funding_initiated() {
+ channel.write(writer)?;
+ }
+ }
+
+ (channel_state.forward_htlcs.len() as u64).write(writer)?;
+ for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
+ short_channel_id.write(writer)?;
+ (pending_forwards.len() as u64).write(writer)?;
+ for forward in pending_forwards {
+ forward.write(writer)?;
+ }
+ }
+
+ (channel_state.claimable_htlcs.len() as u64).write(writer)?;
+ for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
+ payment_hash.write(writer)?;
+ (previous_hops.len() as u64).write(writer)?;
+ for &(recvd_amt, ref previous_hop) in previous_hops.iter() {
+ recvd_amt.write(writer)?;
+ previous_hop.write(writer)?;
+ }
+ }
+
+ Ok(())
+ }
+}
+
+/// Arguments for the creation of a ChannelManager that are not deserialized.
+///
+/// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
+/// is:
+/// 1) Deserialize all stored ChannelMonitors.
+/// 2) Deserialize the ChannelManager by filling in this struct and calling <(Sha256dHash,
+/// ChannelManager)>::read(reader, args).
+/// This may result in closing some Channels if the ChannelMonitor is newer than the stored
+/// ChannelManager state to ensure no loss of funds. Thus, transactions may be broadcasted.
+/// 3) Register all relevant ChannelMonitor outpoints with your chain watch mechanism using
+/// ChannelMonitor::get_monitored_outpoints and ChannelMonitor::get_funding_txo().
+/// 4) Reconnect blocks on your ChannelMonitors.
+/// 5) Move the ChannelMonitors into your local ManyChannelMonitor.
+/// 6) Disconnect/connect blocks on the ChannelManager.
+/// 7) Register the new ChannelManager with your ChainWatchInterface (this does not happen
+/// automatically as it does in ChannelManager::new()).
+pub struct ChannelManagerReadArgs<'a> {
+ /// The keys provider which will give us relevant keys. Some keys will be loaded during
+ /// deserialization.
+ pub keys_manager: Arc<KeysInterface>,
+
+ /// The fee_estimator for use in the ChannelManager in the future.
+ ///
+ /// No calls to the FeeEstimator will be made during deserialization.
+ pub fee_estimator: Arc<FeeEstimator>,
+ /// The ManyChannelMonitor for use in the ChannelManager in the future.
+ ///
+ /// No calls to the ManyChannelMonitor will be made during deserialization. It is assumed that
+ /// you have deserialized ChannelMonitors separately and will add them to your
+ /// ManyChannelMonitor after deserializing this ChannelManager.
+ pub monitor: Arc<ManyChannelMonitor>,
+ /// The ChainWatchInterface for use in the ChannelManager in the future.
+ ///
+ /// No calls to the ChainWatchInterface will be made during deserialization.
+ pub chain_monitor: Arc<ChainWatchInterface>,
+ /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
+ /// used to broadcast the latest local commitment transactions of channels which must be
+ /// force-closed during deserialization.
+ pub tx_broadcaster: Arc<BroadcasterInterface>,
+ /// The Logger for use in the ChannelManager and which may be used to log information during
+ /// deserialization.
+ pub logger: Arc<Logger>,
+ /// Default settings used for new channels. Any existing channels will continue to use the
+ /// runtime settings which were stored when the ChannelManager was serialized.
+ pub default_config: UserConfig,
+
+ /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
+ /// value.get_funding_txo() should be the key).
+ ///
+ /// If a monitor is inconsistent with the channel state during deserialization the channel will
+ /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
+ /// is true for missing channels as well. If there is a monitor missing for which we find
+ /// channel data Err(DecodeError::InvalidValue) will be returned.
+ ///
+ /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
+ /// this struct.
+ pub channel_monitors: &'a HashMap<OutPoint, &'a ChannelMonitor>,
+}
+
+impl<'a, R : ::std::io::Read> ReadableArgs<R, ChannelManagerReadArgs<'a>> for (Sha256dHash, ChannelManager) {
+ fn read(reader: &mut R, args: ChannelManagerReadArgs<'a>) -> Result<Self, DecodeError> {
+ let _ver: u8 = Readable::read(reader)?;
+ let min_ver: u8 = Readable::read(reader)?;
+ if min_ver > SERIALIZATION_VERSION {
+ return Err(DecodeError::UnknownVersion);
+ }
+
+ let genesis_hash: Sha256dHash = Readable::read(reader)?;
+ let latest_block_height: u32 = Readable::read(reader)?;
+ let last_block_hash: Sha256dHash = Readable::read(reader)?;
+
+ let mut closed_channels = Vec::new();
+
+ let channel_count: u64 = Readable::read(reader)?;
+ let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ for _ in 0..channel_count {
+ let mut channel: Channel = ReadableArgs::read(reader, args.logger.clone())?;
+ if channel.last_block_connected != last_block_hash {
+ return Err(DecodeError::InvalidValue);
+ }
+
+ let funding_txo = channel.channel_monitor().get_funding_txo().ok_or(DecodeError::InvalidValue)?;
+ funding_txo_set.insert(funding_txo.clone());
+ if let Some(monitor) = args.channel_monitors.get(&funding_txo) {
+ if channel.get_cur_local_commitment_transaction_number() != monitor.get_cur_local_commitment_number() ||
+ channel.get_revoked_remote_commitment_transaction_number() != monitor.get_min_seen_secret() ||
+ channel.get_cur_remote_commitment_transaction_number() != monitor.get_cur_remote_commitment_number() {
+ let mut force_close_res = channel.force_shutdown();
+ force_close_res.0 = monitor.get_latest_local_commitment_txn();
+ closed_channels.push(force_close_res);
+ } else {
+ if let Some(short_channel_id) = channel.get_short_channel_id() {
+ short_to_id.insert(short_channel_id, channel.channel_id());
+ }
+ by_id.insert(channel.channel_id(), channel);
+ }
+ } else {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ for (ref funding_txo, ref monitor) in args.channel_monitors.iter() {
+ if !funding_txo_set.contains(funding_txo) {
+ closed_channels.push((monitor.get_latest_local_commitment_txn(), Vec::new()));
+ }
+ }
+
+ let forward_htlcs_count: u64 = Readable::read(reader)?;
+ let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
+ for _ in 0..forward_htlcs_count {
+ let short_channel_id = Readable::read(reader)?;
+ let pending_forwards_count: u64 = Readable::read(reader)?;
+ let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, 128));
+ for _ in 0..pending_forwards_count {
+ pending_forwards.push(Readable::read(reader)?);
+ }
+ forward_htlcs.insert(short_channel_id, pending_forwards);
+ }
+
+ let claimable_htlcs_count: u64 = Readable::read(reader)?;
+ let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
+ for _ in 0..claimable_htlcs_count {
+ let payment_hash = Readable::read(reader)?;
+ let previous_hops_len: u64 = Readable::read(reader)?;
+ let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, 2));
+ for _ in 0..previous_hops_len {
+ previous_hops.push((Readable::read(reader)?, Readable::read(reader)?));
+ }
+ claimable_htlcs.insert(payment_hash, previous_hops);
+ }
+
+ let channel_manager = ChannelManager {
+ genesis_hash,
+ fee_estimator: args.fee_estimator,
+ monitor: args.monitor,
+ chain_monitor: args.chain_monitor,
+ tx_broadcaster: args.tx_broadcaster,
+
+ latest_block_height: AtomicUsize::new(latest_block_height as usize),
+ last_block_hash: Mutex::new(last_block_hash),
+ secp_ctx: Secp256k1::new(),
+
+ channel_state: Mutex::new(ChannelHolder {
+ by_id,
+ short_to_id,
+ forward_htlcs,
+ claimable_htlcs,
+ pending_msg_events: Vec::new(),
+ }),
+ our_network_key: args.keys_manager.get_node_secret(),
+
+ pending_events: Mutex::new(Vec::new()),
+ total_consistency_lock: RwLock::new(()),
+ keys_manager: args.keys_manager,
+ logger: args.logger,
+ default_configuration: args.default_config,
+ };
+
+ for close_res in closed_channels.drain(..) {
+ channel_manager.finish_force_close_channel(close_res);
+ //TODO: Broadcast channel update for closed channels, but only after we've made a
+ //connection or two.
+ }
+
+ Ok((last_block_hash.clone(), channel_manager))
+ }
+}
--- /dev/null
- let input = TxIn {
- previous_output: BitcoinOutPoint {
- txid: commitment_txid,
- vout: transaction_output_index,
- },
- script_sig: Script::new(),
- sequence: idx as u32, // reset to 0xfffffffd in sign_input
- witness: Vec::new(),
- };
- if htlc.cltv_expiry > height + CLTV_SHARED_CLAIM_BUFFER {
- inputs.push(input);
- inputs_desc.push(if htlc.offered { InputDescriptors::OfferedHTLC } else { InputDescriptors::ReceivedHTLC });
- inputs_info.push((payment_preimage, tx.output[transaction_output_index as usize].value, htlc.cltv_expiry));
- total_value += tx.output[transaction_output_index as usize].value;
- } else {
- let mut single_htlc_tx = Transaction {
- version: 2,
- lock_time: 0,
- input: vec![input],
- output: vec!(TxOut {
- script_pubkey: self.destination_script.clone(),
- value: htlc.amount_msat / 1000,
- }),
+//! The logic to monitor for on-chain transactions and create the relevant claim responses lives
+//! here.
+//!
+//! ChannelMonitor objects are generated by ChannelManager in response to relevant
+//! messages/actions, and MUST be persisted to disk (and, preferably, remotely) before progress can
+//! be made in responding to certain messages, see ManyChannelMonitor for more.
+//!
+//! Note that ChannelMonitors are an important part of the lightning trust model and a copy of the
+//! latest ChannelMonitor must always be actively monitoring for chain updates (and no out-of-date
+//! ChannelMonitors should do so). Thus, if you're building rust-lightning into an HSM or other
+//! security-domain-separated system design, you should consider having multiple paths for
+//! ChannelMonitors to get out of the HSM and onto monitoring devices.
+
+use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::transaction::{TxIn,TxOut,SigHashType,Transaction};
+use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+use bitcoin::blockdata::script::{Script, Builder};
+use bitcoin::blockdata::opcodes;
+use bitcoin::consensus::encode::{self, Decodable, Encodable};
+use bitcoin::util::hash::BitcoinHash;
+use bitcoin::util::bip143;
+
+use bitcoin_hashes::Hash;
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::hash160::Hash as Hash160;
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+
+use secp256k1::{Secp256k1,Signature};
+use secp256k1::key::{SecretKey,PublicKey};
+use secp256k1;
+
+use ln::msgs::DecodeError;
+use ln::chan_utils;
+use ln::chan_utils::HTLCOutputInCommitment;
+use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
+use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT};
+use chain::chaininterface::{ChainListener, ChainWatchInterface, BroadcasterInterface, FeeEstimator, ConfirmationTarget};
+use chain::transaction::OutPoint;
+use chain::keysinterface::SpendableOutputDescriptor;
+use util::logger::Logger;
+use util::ser::{ReadableArgs, Readable, Writer, Writeable, WriterWriteAdaptor, U48};
+use util::{byte_utils, events};
+
+use std::collections::{HashMap, hash_map};
+use std::sync::{Arc,Mutex};
+use std::{hash,cmp, mem};
+
+/// An error enum representing a failure to persist a channel monitor update.
+#[derive(Clone)]
+pub enum ChannelMonitorUpdateErr {
+ /// Used to indicate a temporary failure (eg connection to a watchtower or remote backup of
+ /// our state failed, but is expected to succeed at some point in the future).
+ ///
+ /// Such a failure will "freeze" a channel, preventing us from revoking old states or
+ /// submitting new commitment transactions to the remote party.
+ /// ChannelManager::test_restore_channel_monitor can be used to retry the update(s) and restore
+ /// the channel to an operational state.
+ ///
+ /// Note that continuing to operate when no copy of the updated ChannelMonitor could be
+ /// persisted is unsafe - if you failed to store the update on your own local disk you should
+ /// instead return PermanentFailure to force closure of the channel ASAP.
+ ///
+ /// Even when a channel has been "frozen" updates to the ChannelMonitor can continue to occur
+ /// (eg if an inbound HTLC which we forwarded was claimed upstream resulting in us attempting
+ /// to claim it on this channel) and those updates must be applied wherever they can be. At
+ /// least one such updated ChannelMonitor must be persisted otherwise PermanentFailure should
+ /// be returned to get things on-chain ASAP using only the in-memory copy. Obviously updates to
+ /// the channel which would invalidate previous ChannelMonitors are not made when a channel has
+ /// been "frozen".
+ ///
+ /// Note that even if updates made after TemporaryFailure succeed you must still call
+ /// test_restore_channel_monitor to ensure you have the latest monitor and re-enable normal
+ /// channel operation.
+ ///
+ /// For deployments where a copy of ChannelMonitors and other local state are backed up in a
+ /// remote location (with local copies persisted immediately), it is anticipated that all
+ /// updates will return TemporaryFailure until the remote copies could be updated.
+ TemporaryFailure,
+ /// Used to indicate no further channel monitor updates will be allowed (eg we've moved on to a
+ /// different watchtower and cannot update with all watchtowers that were previously informed
+ /// of this channel). This will force-close the channel in question.
+ ///
+ /// Should also be used to indicate a failure to update the local copy of the channel monitor.
+ PermanentFailure,
+}
+
+/// General Err type for ChannelMonitor actions. Generally, this implies that the data provided is
+/// inconsistent with the ChannelMonitor being called. eg for ChannelMonitor::insert_combine this
+/// means you tried to merge two monitors for different channels or for a channel which was
+/// restored from a backup and then generated new commitment updates.
+/// Contains a human-readable error message.
+#[derive(Debug)]
+pub struct MonitorUpdateError(pub &'static str);
+
+/// Simple structure send back by ManyChannelMonitor in case of HTLC detected onchain from a
+/// forward channel and from which info are needed to update HTLC in a backward channel.
+pub struct HTLCUpdate {
+ pub(super) payment_hash: PaymentHash,
+ pub(super) payment_preimage: Option<PaymentPreimage>,
+ pub(super) source: HTLCSource
+}
+
+/// Simple trait indicating ability to track a set of ChannelMonitors and multiplex events between
+/// them. Generally should be implemented by keeping a local SimpleManyChannelMonitor and passing
+/// events to it, while also taking any add_update_monitor events and passing them to some remote
+/// server(s).
+///
+/// Note that any updates to a channel's monitor *must* be applied to each instance of the
+/// channel's monitor everywhere (including remote watchtowers) *before* this function returns. If
+/// an update occurs and a remote watchtower is left with old state, it may broadcast transactions
+/// which we have revoked, allowing our counterparty to claim all funds in the channel!
+pub trait ManyChannelMonitor: Send + Sync {
+ /// Adds or updates a monitor for the given `funding_txo`.
+ ///
+ /// Implementor must also ensure that the funding_txo outpoint is registered with any relevant
+ /// ChainWatchInterfaces such that the provided monitor receives block_connected callbacks with
+ /// any spends of it.
+ fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr>;
+
+ /// Used by ChannelManager to get list of HTLC resolved onchain and which needed to be updated
+ /// with success or failure backward
+ fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate>;
+}
+
+/// A simple implementation of a ManyChannelMonitor and ChainListener. Can be used to create a
+/// watchtower or watch our own channels.
+///
+/// Note that you must provide your own key by which to refer to channels.
+///
+/// If you're accepting remote monitors (ie are implementing a watchtower), you must verify that
+/// users cannot overwrite a given channel by providing a duplicate key. ie you should probably
+/// index by a PublicKey which is required to sign any updates.
+///
+/// If you're using this for local monitoring of your own channels, you probably want to use
+/// `OutPoint` as the key, which will give you a ManyChannelMonitor implementation.
+pub struct SimpleManyChannelMonitor<Key> {
+ #[cfg(test)] // Used in ChannelManager tests to manipulate channels directly
+ pub monitors: Mutex<HashMap<Key, ChannelMonitor>>,
+ #[cfg(not(test))]
+ monitors: Mutex<HashMap<Key, ChannelMonitor>>,
+ chain_monitor: Arc<ChainWatchInterface>,
+ broadcaster: Arc<BroadcasterInterface>,
+ pending_events: Mutex<Vec<events::Event>>,
+ pending_htlc_updated: Mutex<HashMap<PaymentHash, Vec<(HTLCSource, Option<PaymentPreimage>)>>>,
+ logger: Arc<Logger>,
+ fee_estimator: Arc<FeeEstimator>
+}
+
+impl<Key : Send + cmp::Eq + hash::Hash> ChainListener for SimpleManyChannelMonitor<Key> {
+ fn block_connected(&self, header: &BlockHeader, height: u32, txn_matched: &[&Transaction], _indexes_of_txn_matched: &[u32]) {
+ let block_hash = header.bitcoin_hash();
+ let mut new_events: Vec<events::Event> = Vec::with_capacity(0);
+ let mut htlc_updated_infos = Vec::new();
+ {
+ let mut monitors = self.monitors.lock().unwrap();
+ for monitor in monitors.values_mut() {
+ let (txn_outputs, spendable_outputs, mut htlc_updated) = monitor.block_connected(txn_matched, height, &block_hash, &*self.broadcaster, &*self.fee_estimator);
+ if spendable_outputs.len() > 0 {
+ new_events.push(events::Event::SpendableOutputs {
+ outputs: spendable_outputs,
+ });
+ }
+
+ for (ref txid, ref outputs) in txn_outputs {
+ for (idx, output) in outputs.iter().enumerate() {
+ self.chain_monitor.install_watch_outpoint((txid.clone(), idx as u32), &output.script_pubkey);
+ }
+ }
+ htlc_updated_infos.append(&mut htlc_updated);
+ }
+ }
+ {
+ // ChannelManager will just need to fetch pending_htlc_updated and pass state backward
+ let mut pending_htlc_updated = self.pending_htlc_updated.lock().unwrap();
+ for htlc in htlc_updated_infos.drain(..) {
+ match pending_htlc_updated.entry(htlc.2) {
+ hash_map::Entry::Occupied(mut e) => {
+ // In case of reorg we may have htlc outputs solved in a different way so
+ // we prefer to keep claims but don't store duplicate updates for a given
+ // (payment_hash, HTLCSource) pair.
+ let mut existing_claim = false;
+ e.get_mut().retain(|htlc_data| {
+ if htlc.0 == htlc_data.0 {
+ if htlc_data.1.is_some() {
+ existing_claim = true;
+ true
+ } else { false }
+ } else { true }
+ });
+ if !existing_claim {
+ e.get_mut().push((htlc.0, htlc.1));
+ }
+ }
+ hash_map::Entry::Vacant(e) => {
+ e.insert(vec![(htlc.0, htlc.1)]);
+ }
+ }
+ }
+ }
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.append(&mut new_events);
+ }
+
+ fn block_disconnected(&self, header: &BlockHeader, disconnected_height: u32) {
+ let block_hash = header.bitcoin_hash();
+ let mut monitors = self.monitors.lock().unwrap();
+ for monitor in monitors.values_mut() {
+ monitor.block_disconnected(disconnected_height, &block_hash);
+ }
+ }
+}
+
+impl<Key : Send + cmp::Eq + hash::Hash + 'static> SimpleManyChannelMonitor<Key> {
+ /// Creates a new object which can be used to monitor several channels given the chain
+ /// interface with which to register to receive notifications.
+ pub fn new(chain_monitor: Arc<ChainWatchInterface>, broadcaster: Arc<BroadcasterInterface>, logger: Arc<Logger>, feeest: Arc<FeeEstimator>) -> Arc<SimpleManyChannelMonitor<Key>> {
+ let res = Arc::new(SimpleManyChannelMonitor {
+ monitors: Mutex::new(HashMap::new()),
+ chain_monitor,
+ broadcaster,
+ pending_events: Mutex::new(Vec::new()),
+ pending_htlc_updated: Mutex::new(HashMap::new()),
+ logger,
+ fee_estimator: feeest,
+ });
+ let weak_res = Arc::downgrade(&res);
+ res.chain_monitor.register_listener(weak_res);
+ res
+ }
+
+ /// Adds or updates the monitor which monitors the channel referred to by the given key.
+ pub fn add_update_monitor_by_key(&self, key: Key, monitor: ChannelMonitor) -> Result<(), MonitorUpdateError> {
+ let mut monitors = self.monitors.lock().unwrap();
+ match monitors.get_mut(&key) {
+ Some(orig_monitor) => {
+ log_trace!(self, "Updating Channel Monitor for channel {}", log_funding_info!(monitor.key_storage));
+ return orig_monitor.insert_combine(monitor);
+ },
+ None => {}
+ };
+ match monitor.key_storage {
+ Storage::Local { ref funding_info, .. } => {
+ match funding_info {
+ &None => {
+ return Err(MonitorUpdateError("Try to update a useless monitor without funding_txo !"));
+ },
+ &Some((ref outpoint, ref script)) => {
+ log_trace!(self, "Got new Channel Monitor for channel {}", log_bytes!(outpoint.to_channel_id()[..]));
+ self.chain_monitor.install_watch_tx(&outpoint.txid, script);
+ self.chain_monitor.install_watch_outpoint((outpoint.txid, outpoint.index as u32), script);
+ },
+ }
+ },
+ Storage::Watchtower { .. } => {
+ self.chain_monitor.watch_all_txn();
+ }
+ }
+ monitors.insert(key, monitor);
+ Ok(())
+ }
+}
+
+impl ManyChannelMonitor for SimpleManyChannelMonitor<OutPoint> {
+ fn add_update_monitor(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result<(), ChannelMonitorUpdateErr> {
+ match self.add_update_monitor_by_key(funding_txo, monitor) {
+ Ok(_) => Ok(()),
+ Err(_) => Err(ChannelMonitorUpdateErr::PermanentFailure),
+ }
+ }
+
+ fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
+ let mut updated = self.pending_htlc_updated.lock().unwrap();
+ let mut pending_htlcs_updated = Vec::with_capacity(updated.len());
+ for (k, v) in updated.drain() {
+ for htlc_data in v {
+ pending_htlcs_updated.push(HTLCUpdate {
+ payment_hash: k,
+ payment_preimage: htlc_data.1,
+ source: htlc_data.0,
+ });
+ }
+ }
+ pending_htlcs_updated
+ }
+}
+
+impl<Key : Send + cmp::Eq + hash::Hash> events::EventsProvider for SimpleManyChannelMonitor<Key> {
+ fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ let mut ret = Vec::new();
+ mem::swap(&mut ret, &mut *pending_events);
+ ret
+ }
+}
+
+/// If an HTLC expires within this many blocks, don't try to claim it in a shared transaction,
+/// instead claiming it in its own individual transaction.
+const CLTV_SHARED_CLAIM_BUFFER: u32 = 12;
+/// If an HTLC expires within this many blocks, force-close the channel to broadcast the
+/// HTLC-Success transaction.
+/// In other words, this is an upper bound on how many blocks we think it can take us to get a
+/// transaction confirmed (and we use it in a few more, equivalent, places).
+pub(crate) const CLTV_CLAIM_BUFFER: u32 = 6;
+/// Number of blocks by which point we expect our counterparty to have seen new blocks on the
+/// network and done a full update_fail_htlc/commitment_signed dance (+ we've updated all our
+/// copies of ChannelMonitors, including watchtowers). We could enforce the contract by failing
+/// at CLTV expiration height but giving a grace period to our peer may be profitable for us if he
+/// can provide an over-late preimage. Nevertheless, grace period has to be accounted in our
+/// CLTV_EXPIRY_DELTA to be secure. Following this policy we may decrease the rate of channel failures
+/// due to expiration but increase the cost of funds being locked longuer in case of failure.
+/// This delay also cover a low-power peer being slow to process blocks and so being behind us on
+/// accurate block height.
+/// In case of onchain failure to be pass backward we may see the last block of ANTI_REORG_DELAY
+/// with at worst this delay, so we are not only using this value as a mercy for them but also
+/// us as a safeguard to delay with enough time.
+pub(crate) const LATENCY_GRACE_PERIOD_BLOCKS: u32 = 3;
+/// Number of blocks we wait on seeing a HTLC output being solved before we fail corresponding inbound
+/// HTLCs. This prevents us from failing backwards and then getting a reorg resulting in us losing money.
+/// We use also this delay to be sure we can remove our in-flight claim txn from bump candidates buffer.
+/// It may cause spurrious generation of bumped claim txn but that's allright given the outpoint is already
+/// solved by a previous claim tx. What we want to avoid is reorg evicting our claim tx and us not
+/// keeping bumping another claim tx to solve the outpoint.
+pub(crate) const ANTI_REORG_DELAY: u32 = 6;
+
+#[derive(Clone, PartialEq)]
+enum Storage {
+ Local {
+ revocation_base_key: SecretKey,
+ htlc_base_key: SecretKey,
+ delayed_payment_base_key: SecretKey,
+ payment_base_key: SecretKey,
+ shutdown_pubkey: PublicKey,
+ prev_latest_per_commitment_point: Option<PublicKey>,
+ latest_per_commitment_point: Option<PublicKey>,
+ funding_info: Option<(OutPoint, Script)>,
+ current_remote_commitment_txid: Option<Sha256dHash>,
+ prev_remote_commitment_txid: Option<Sha256dHash>,
+ },
+ Watchtower {
+ revocation_base_key: PublicKey,
+ htlc_base_key: PublicKey,
+ }
+}
+
+#[derive(Clone, PartialEq)]
+struct LocalSignedTx {
+ /// txid of the transaction in tx, just used to make comparison faster
+ txid: Sha256dHash,
+ tx: Transaction,
+ revocation_key: PublicKey,
+ a_htlc_key: PublicKey,
+ b_htlc_key: PublicKey,
+ delayed_payment_key: PublicKey,
+ feerate_per_kw: u64,
+ htlc_outputs: Vec<(HTLCOutputInCommitment, Option<(Signature, Signature)>, Option<HTLCSource>)>,
+}
+
+#[derive(PartialEq)]
+enum InputDescriptors {
+ RevokedOfferedHTLC,
+ RevokedReceivedHTLC,
+ OfferedHTLC,
+ ReceivedHTLC,
+ RevokedOutput, // either a revoked to_local output on commitment tx, a revoked HTLC-Timeout output or a revoked HTLC-Success output
+}
+
+/// When ChannelMonitor discovers an onchain outpoint being a step of a channel and that it needs
+/// to generate a tx to push channel state forward, we cache outpoint-solving tx material to build
+/// a new bumped one in case of lenghty confirmation delay
+#[derive(Clone, PartialEq)]
+enum TxMaterial {
+ Revoked {
+ script: Script,
+ pubkey: Option<PublicKey>,
+ key: SecretKey,
+ is_htlc: bool,
+ amount: u64,
+ },
+ RemoteHTLC {
+ script: Script,
+ key: SecretKey,
+ preimage: Option<PaymentPreimage>,
+ amount: u64,
+ },
+ LocalHTLC {
+ script: Script,
+ sigs: (Signature, Signature),
+ preimage: Option<PaymentPreimage>,
+ amount: u64,
+ }
+}
+
+/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it
+/// once they mature to enough confirmations (ANTI_REORG_DELAY)
+#[derive(Clone, PartialEq)]
+enum OnchainEvent {
+ /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from
+ /// bump-txn candidate buffer.
+ Claim {
+ outpoint: BitcoinOutPoint,
+ },
+ /// HTLC output getting solved by a timeout, at maturation we pass upstream payment source information to solve
+ /// inbound HTLC in backward channel. Note, in case of preimage, we pass info to upstream without delay as we can
+ /// only win from it, so it's never an OnchainEvent
+ HTLCUpdate {
+ htlc_update: (HTLCSource, PaymentHash),
+ },
+}
+
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
+
+/// A ChannelMonitor handles chain events (blocks connected and disconnected) and generates
+/// on-chain transactions to ensure no loss of funds occurs.
+///
+/// You MUST ensure that no ChannelMonitors for a given channel anywhere contain out-of-date
+/// information and are actively monitoring the chain.
+#[derive(Clone)]
+pub struct ChannelMonitor {
+ commitment_transaction_number_obscure_factor: u64,
+
+ key_storage: Storage,
+ their_htlc_base_key: Option<PublicKey>,
+ their_delayed_payment_base_key: Option<PublicKey>,
+ // first is the idx of the first of the two revocation points
+ their_cur_revocation_points: Option<(u64, PublicKey, Option<PublicKey>)>,
+
+ our_to_self_delay: u16,
+ their_to_self_delay: Option<u16>,
+
+ old_secrets: [([u8; 32], u64); 49],
+ remote_claimable_outpoints: HashMap<Sha256dHash, Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>>,
+ /// We cannot identify HTLC-Success or HTLC-Timeout transactions by themselves on the chain.
+ /// Nor can we figure out their commitment numbers without the commitment transaction they are
+ /// spending. Thus, in order to claim them via revocation key, we track all the remote
+ /// commitment transactions which we find on-chain, mapping them to the commitment number which
+ /// can be used to derive the revocation key and claim the transactions.
+ remote_commitment_txn_on_chain: HashMap<Sha256dHash, (u64, Vec<Script>)>,
+ /// Cache used to make pruning of payment_preimages faster.
+ /// Maps payment_hash values to commitment numbers for remote transactions for non-revoked
+ /// remote transactions (ie should remain pretty small).
+ /// Serialized to disk but should generally not be sent to Watchtowers.
+ remote_hash_commitment_number: HashMap<PaymentHash, u64>,
+
+ // We store two local commitment transactions to avoid any race conditions where we may update
+ // some monitors (potentially on watchtowers) but then fail to update others, resulting in the
+ // various monitors for one channel being out of sync, and us broadcasting a local
+ // transaction for which we have deleted claim information on some watchtowers.
+ prev_local_signed_commitment_tx: Option<LocalSignedTx>,
+ current_local_signed_commitment_tx: Option<LocalSignedTx>,
+
+ // Used just for ChannelManager to make sure it has the latest channel data during
+ // deserialization
+ current_remote_commitment_number: u64,
+
+ payment_preimages: HashMap<PaymentHash, PaymentPreimage>,
+
+ destination_script: Script,
+ // Thanks to data loss protection, we may be able to claim our non-htlc funds
+ // back, this is the script we have to spend from but we need to
+ // scan every commitment transaction for that
+ to_remote_rescue: Option<(Script, SecretKey)>,
+
+ // Used to track outpoint in the process of being claimed by our transactions. We need to scan all transactions
+ // for inputs spending this. If height timer (u32) is expired and claim tx hasn't reached enough confirmations
+ // before, use TxMaterial to regenerate a new claim tx with a satoshis-per-1000-weight-units higher than last
+ // one (u64), if timelock expiration (u32) is near, decrease height timer, the in-between bumps delay.
+ // Last field cached (u32) is height of outpoint confirmation, which is needed to flush this tracker
+ // in case of reorgs, given block timer are scaled on timer expiration we can't deduce from it original height.
+ our_claim_txn_waiting_first_conf: HashMap<BitcoinOutPoint, (u32, TxMaterial, u64, u32, u32)>,
+
+ // Used to track onchain events, i.e transactions parts of channels confirmed on chain, on which
+ // we have to take actions once they reach enough confs. Key is a block height timer, i.e we enforce
+ // actions when we receive a block with given height. Actions depend on OnchainEvent type.
+ onchain_events_waiting_threshold_conf: HashMap<u32, Vec<OnchainEvent>>,
+
+ // We simply modify last_block_hash in Channel's block_connected so that serialization is
+ // consistent but hopefully the users' copy handles block_connected in a consistent way.
+ // (we do *not*, however, update them in insert_combine to ensure any local user copies keep
+ // their last_block_hash from its state and not based on updated copies that didn't run through
+ // the full block_connected).
+ pub(crate) last_block_hash: Sha256dHash,
+ secp_ctx: Secp256k1<secp256k1::All>, //TODO: dedup this a bit...
+ logger: Arc<Logger>,
+}
+
+macro_rules! subtract_high_prio_fee {
+ ($self: ident, $fee_estimator: expr, $value: expr, $predicted_weight: expr, $spent_txid: expr, $used_feerate: expr) => {
+ {
+ $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority);
+ let mut fee = $used_feerate * ($predicted_weight as u64) / 1000;
+ if $value <= fee {
+ $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ fee = $used_feerate * ($predicted_weight as u64) / 1000;
+ if $value <= fee {
+ $used_feerate = $fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
+ fee = $used_feerate * ($predicted_weight as u64) / 1000;
+ if $value <= fee {
+ log_error!($self, "Failed to generate an on-chain punishment tx spending {} as even low priority fee ({} sat) was more than the entire claim balance ({} sat)",
+ $spent_txid, fee, $value);
+ false
+ } else {
+ log_warn!($self, "Used low priority fee for on-chain punishment tx spending {} as high priority fee was more than the entire claim balance ({} sat)",
+ $spent_txid, $value);
+ $value -= fee;
+ true
+ }
+ } else {
+ log_warn!($self, "Used medium priority fee for on-chain punishment tx spending {} as high priority fee was more than the entire claim balance ({} sat)",
+ $spent_txid, $value);
+ $value -= fee;
+ true
+ }
+ } else {
+ $value -= fee;
+ true
+ }
+ }
+ }
+}
+
+#[cfg(any(test, feature = "fuzztarget"))]
+/// Used only in testing and fuzztarget to check serialization roundtrips don't change the
+/// underlying object
+impl PartialEq for ChannelMonitor {
+ fn eq(&self, other: &Self) -> bool {
+ if self.commitment_transaction_number_obscure_factor != other.commitment_transaction_number_obscure_factor ||
+ self.key_storage != other.key_storage ||
+ self.their_htlc_base_key != other.their_htlc_base_key ||
+ self.their_delayed_payment_base_key != other.their_delayed_payment_base_key ||
+ self.their_cur_revocation_points != other.their_cur_revocation_points ||
+ self.our_to_self_delay != other.our_to_self_delay ||
+ self.their_to_self_delay != other.their_to_self_delay ||
+ self.remote_claimable_outpoints != other.remote_claimable_outpoints ||
+ self.remote_commitment_txn_on_chain != other.remote_commitment_txn_on_chain ||
+ self.remote_hash_commitment_number != other.remote_hash_commitment_number ||
+ self.prev_local_signed_commitment_tx != other.prev_local_signed_commitment_tx ||
+ self.current_remote_commitment_number != other.current_remote_commitment_number ||
+ self.current_local_signed_commitment_tx != other.current_local_signed_commitment_tx ||
+ self.payment_preimages != other.payment_preimages ||
+ self.destination_script != other.destination_script ||
+ self.to_remote_rescue != other.to_remote_rescue ||
+ self.our_claim_txn_waiting_first_conf != other.our_claim_txn_waiting_first_conf ||
+ self.onchain_events_waiting_threshold_conf != other.onchain_events_waiting_threshold_conf
+ {
+ false
+ } else {
+ for (&(ref secret, ref idx), &(ref o_secret, ref o_idx)) in self.old_secrets.iter().zip(other.old_secrets.iter()) {
+ if secret != o_secret || idx != o_idx {
+ return false
+ }
+ }
+ true
+ }
+ }
+}
+
+impl ChannelMonitor {
+ pub(super) fn new(revocation_base_key: &SecretKey, delayed_payment_base_key: &SecretKey, htlc_base_key: &SecretKey, payment_base_key: &SecretKey, shutdown_pubkey: &PublicKey, our_to_self_delay: u16, destination_script: Script, logger: Arc<Logger>) -> ChannelMonitor {
+ ChannelMonitor {
+ commitment_transaction_number_obscure_factor: 0,
+
+ key_storage: Storage::Local {
+ revocation_base_key: revocation_base_key.clone(),
+ htlc_base_key: htlc_base_key.clone(),
+ delayed_payment_base_key: delayed_payment_base_key.clone(),
+ payment_base_key: payment_base_key.clone(),
+ shutdown_pubkey: shutdown_pubkey.clone(),
+ prev_latest_per_commitment_point: None,
+ latest_per_commitment_point: None,
+ funding_info: None,
+ current_remote_commitment_txid: None,
+ prev_remote_commitment_txid: None,
+ },
+ their_htlc_base_key: None,
+ their_delayed_payment_base_key: None,
+ their_cur_revocation_points: None,
+
+ our_to_self_delay: our_to_self_delay,
+ their_to_self_delay: None,
+
+ old_secrets: [([0; 32], 1 << 48); 49],
+ remote_claimable_outpoints: HashMap::new(),
+ remote_commitment_txn_on_chain: HashMap::new(),
+ remote_hash_commitment_number: HashMap::new(),
+
+ prev_local_signed_commitment_tx: None,
+ current_local_signed_commitment_tx: None,
+ current_remote_commitment_number: 1 << 48,
+
+ payment_preimages: HashMap::new(),
+ destination_script: destination_script,
+ to_remote_rescue: None,
+
+ our_claim_txn_waiting_first_conf: HashMap::new(),
+
+ onchain_events_waiting_threshold_conf: HashMap::new(),
+
+ last_block_hash: Default::default(),
+ secp_ctx: Secp256k1::new(),
+ logger,
+ }
+ }
+
+ fn get_witnesses_weight(inputs: &[InputDescriptors]) -> usize {
+ let mut tx_weight = 2; // count segwit flags
+ for inp in inputs {
+ // We use expected weight (and not actual) as signatures and time lock delays may vary
+ tx_weight += match inp {
+ // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+ &InputDescriptors::RevokedOfferedHTLC => {
+ 1 + 1 + 73 + 1 + 33 + 1 + 133
+ },
+ // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+ &InputDescriptors::RevokedReceivedHTLC => {
+ 1 + 1 + 73 + 1 + 33 + 1 + 139
+ },
+ // number_of_witness_elements + sig_length + remotehtlc_sig + preimage_length + preimage + witness_script_length + witness_script
+ &InputDescriptors::OfferedHTLC => {
+ 1 + 1 + 73 + 1 + 32 + 1 + 133
+ },
+ // number_of_witness_elements + sig_length + revocation_sig + pubkey_length + revocationpubkey + witness_script_length + witness_script
+ &InputDescriptors::ReceivedHTLC => {
+ 1 + 1 + 73 + 1 + 1 + 1 + 139
+ },
+ // number_of_witness_elements + sig_length + revocation_sig + true_length + op_true + witness_script_length + witness_script
+ &InputDescriptors::RevokedOutput => {
+ 1 + 1 + 73 + 1 + 1 + 1 + 77
+ },
+ };
+ }
+ tx_weight
+ }
+
+ fn get_height_timer(current_height: u32, timelock_expiration: u32) -> u32 {
+ if timelock_expiration <= current_height || timelock_expiration - current_height <= 3 {
+ return current_height + 1
+ } else if timelock_expiration - current_height <= 15 {
+ return current_height + 3
+ }
+ current_height + 15
+ }
+
+ #[inline]
+ fn place_secret(idx: u64) -> u8 {
+ for i in 0..48 {
+ if idx & (1 << i) == (1 << i) {
+ return i
+ }
+ }
+ 48
+ }
+
+ #[inline]
+ fn derive_secret(secret: [u8; 32], bits: u8, idx: u64) -> [u8; 32] {
+ let mut res: [u8; 32] = secret;
+ for i in 0..bits {
+ let bitpos = bits - 1 - i;
+ if idx & (1 << bitpos) == (1 << bitpos) {
+ res[(bitpos / 8) as usize] ^= 1 << (bitpos & 7);
+ res = Sha256::hash(&res).into_inner();
+ }
+ }
+ res
+ }
+
+ /// Inserts a revocation secret into this channel monitor. Prunes old preimages if neither
+ /// needed by local commitment transactions HTCLs nor by remote ones. Unless we haven't already seen remote
+ /// commitment transaction's secret, they are de facto pruned (we can use revocation key).
+ pub(super) fn provide_secret(&mut self, idx: u64, secret: [u8; 32]) -> Result<(), MonitorUpdateError> {
+ let pos = ChannelMonitor::place_secret(idx);
+ for i in 0..pos {
+ let (old_secret, old_idx) = self.old_secrets[i as usize];
+ if ChannelMonitor::derive_secret(secret, pos, old_idx) != old_secret {
+ return Err(MonitorUpdateError("Previous secret did not match new one"));
+ }
+ }
+ if self.get_min_seen_secret() <= idx {
+ return Ok(());
+ }
+ self.old_secrets[pos as usize] = (secret, idx);
+
+ // Prune HTLCs from the previous remote commitment tx so we don't generate failure/fulfill
+ // events for now-revoked/fulfilled HTLCs.
+ // TODO: We should probably consider whether we're really getting the next secret here.
+ if let Storage::Local { ref mut prev_remote_commitment_txid, .. } = self.key_storage {
+ if let Some(txid) = prev_remote_commitment_txid.take() {
+ for &mut (_, ref mut source) in self.remote_claimable_outpoints.get_mut(&txid).unwrap() {
+ *source = None;
+ }
+ }
+ }
+
+ if !self.payment_preimages.is_empty() {
+ let local_signed_commitment_tx = self.current_local_signed_commitment_tx.as_ref().expect("Channel needs at least an initial commitment tx !");
+ let prev_local_signed_commitment_tx = self.prev_local_signed_commitment_tx.as_ref();
+ let min_idx = self.get_min_seen_secret();
+ let remote_hash_commitment_number = &mut self.remote_hash_commitment_number;
+
+ self.payment_preimages.retain(|&k, _| {
+ for &(ref htlc, _, _) in &local_signed_commitment_tx.htlc_outputs {
+ if k == htlc.payment_hash {
+ return true
+ }
+ }
+ if let Some(prev_local_commitment_tx) = prev_local_signed_commitment_tx {
+ for &(ref htlc, _, _) in prev_local_commitment_tx.htlc_outputs.iter() {
+ if k == htlc.payment_hash {
+ return true
+ }
+ }
+ }
+ let contains = if let Some(cn) = remote_hash_commitment_number.get(&k) {
+ if *cn < min_idx {
+ return true
+ }
+ true
+ } else { false };
+ if contains {
+ remote_hash_commitment_number.remove(&k);
+ }
+ false
+ });
+ }
+
+ Ok(())
+ }
+
+ /// Informs this monitor of the latest remote (ie non-broadcastable) commitment transaction.
+ /// The monitor watches for it to be broadcasted and then uses the HTLC information (and
+ /// possibly future revocation/preimage information) to claim outputs where possible.
+ /// We cache also the mapping hash:commitment number to lighten pruning of old preimages by watchtowers.
+ pub(super) fn provide_latest_remote_commitment_tx_info(&mut self, unsigned_commitment_tx: &Transaction, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)>, commitment_number: u64, their_revocation_point: PublicKey) {
+ // TODO: Encrypt the htlc_outputs data with the single-hash of the commitment transaction
+ // so that a remote monitor doesn't learn anything unless there is a malicious close.
+ // (only maybe, sadly we cant do the same for local info, as we need to be aware of
+ // timeouts)
+ for &(ref htlc, _) in &htlc_outputs {
+ self.remote_hash_commitment_number.insert(htlc.payment_hash, commitment_number);
+ }
+
+ let new_txid = unsigned_commitment_tx.txid();
+ log_trace!(self, "Tracking new remote commitment transaction with txid {} at commitment number {} with {} HTLC outputs", new_txid, commitment_number, htlc_outputs.len());
+ log_trace!(self, "New potential remote commitment transaction: {}", encode::serialize_hex(unsigned_commitment_tx));
+ if let Storage::Local { ref mut current_remote_commitment_txid, ref mut prev_remote_commitment_txid, .. } = self.key_storage {
+ *prev_remote_commitment_txid = current_remote_commitment_txid.take();
+ *current_remote_commitment_txid = Some(new_txid);
+ }
+ self.remote_claimable_outpoints.insert(new_txid, htlc_outputs);
+ self.current_remote_commitment_number = commitment_number;
+ //TODO: Merge this into the other per-remote-transaction output storage stuff
+ match self.their_cur_revocation_points {
+ Some(old_points) => {
+ if old_points.0 == commitment_number + 1 {
+ self.their_cur_revocation_points = Some((old_points.0, old_points.1, Some(their_revocation_point)));
+ } else if old_points.0 == commitment_number + 2 {
+ if let Some(old_second_point) = old_points.2 {
+ self.their_cur_revocation_points = Some((old_points.0 - 1, old_second_point, Some(their_revocation_point)));
+ } else {
+ self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+ }
+ } else {
+ self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+ }
+ },
+ None => {
+ self.their_cur_revocation_points = Some((commitment_number, their_revocation_point, None));
+ }
+ }
+ }
+
+ pub(super) fn provide_rescue_remote_commitment_tx_info(&mut self, their_revocation_point: PublicKey) {
+ match self.key_storage {
+ Storage::Local { ref payment_base_key, .. } => {
+ if let Ok(payment_key) = chan_utils::derive_public_key(&self.secp_ctx, &their_revocation_point, &PublicKey::from_secret_key(&self.secp_ctx, &payment_base_key)) {
+ let to_remote_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
+ .push_slice(&Hash160::hash(&payment_key.serialize())[..])
+ .into_script();
+ if let Ok(to_remote_key) = chan_utils::derive_private_key(&self.secp_ctx, &their_revocation_point, &payment_base_key) {
+ self.to_remote_rescue = Some((to_remote_script, to_remote_key));
+ }
+ }
+ },
+ Storage::Watchtower { .. } => {}
+ }
+ }
+
+ /// Informs this monitor of the latest local (ie broadcastable) commitment transaction. The
+ /// monitor watches for timeouts and may broadcast it if we approach such a timeout. Thus, it
+ /// is important that any clones of this channel monitor (including remote clones) by kept
+ /// up-to-date as our local commitment transaction is updated.
+ /// Panics if set_their_to_self_delay has never been called.
+ /// Also update Storage with latest local per_commitment_point to derive local_delayedkey in
+ /// case of onchain HTLC tx
+ pub(super) fn provide_latest_local_commitment_tx_info(&mut self, signed_commitment_tx: Transaction, local_keys: chan_utils::TxCreationKeys, feerate_per_kw: u64, htlc_outputs: Vec<(HTLCOutputInCommitment, Option<(Signature, Signature)>, Option<HTLCSource>)>) {
+ assert!(self.their_to_self_delay.is_some());
+ self.prev_local_signed_commitment_tx = self.current_local_signed_commitment_tx.take();
+ self.current_local_signed_commitment_tx = Some(LocalSignedTx {
+ txid: signed_commitment_tx.txid(),
+ tx: signed_commitment_tx,
+ revocation_key: local_keys.revocation_key,
+ a_htlc_key: local_keys.a_htlc_key,
+ b_htlc_key: local_keys.b_htlc_key,
+ delayed_payment_key: local_keys.a_delayed_payment_key,
+ feerate_per_kw,
+ htlc_outputs,
+ });
+
+ if let Storage::Local { ref mut latest_per_commitment_point, .. } = self.key_storage {
+ *latest_per_commitment_point = Some(local_keys.per_commitment_point);
+ } else {
+ panic!("Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode?");
+ }
+ }
+
+ /// Provides a payment_hash->payment_preimage mapping. Will be automatically pruned when all
+ /// commitment_tx_infos which contain the payment hash have been revoked.
+ pub(super) fn provide_payment_preimage(&mut self, payment_hash: &PaymentHash, payment_preimage: &PaymentPreimage) {
+ self.payment_preimages.insert(payment_hash.clone(), payment_preimage.clone());
+ }
+
+ /// Combines this ChannelMonitor with the information contained in the other ChannelMonitor.
+ /// After a successful call this ChannelMonitor is up-to-date and is safe to use to monitor the
+ /// chain for new blocks/transactions.
+ pub fn insert_combine(&mut self, mut other: ChannelMonitor) -> Result<(), MonitorUpdateError> {
+ match self.key_storage {
+ Storage::Local { ref funding_info, .. } => {
+ if funding_info.is_none() { return Err(MonitorUpdateError("Try to combine a Local monitor without funding_info")); }
+ let our_funding_info = funding_info;
+ if let Storage::Local { ref funding_info, .. } = other.key_storage {
+ if funding_info.is_none() { return Err(MonitorUpdateError("Try to combine a Local monitor without funding_info")); }
+ // We should be able to compare the entire funding_txo, but in fuzztarget it's trivially
+ // easy to collide the funding_txo hash and have a different scriptPubKey.
+ if funding_info.as_ref().unwrap().0 != our_funding_info.as_ref().unwrap().0 {
+ return Err(MonitorUpdateError("Funding transaction outputs are not identical!"));
+ }
+ } else {
+ return Err(MonitorUpdateError("Try to combine a Local monitor with a Watchtower one !"));
+ }
+ },
+ Storage::Watchtower { .. } => {
+ if let Storage::Watchtower { .. } = other.key_storage {
+ unimplemented!();
+ } else {
+ return Err(MonitorUpdateError("Try to combine a Watchtower monitor with a Local one !"));
+ }
+ },
+ }
+ let other_min_secret = other.get_min_seen_secret();
+ let our_min_secret = self.get_min_seen_secret();
+ if our_min_secret > other_min_secret {
+ self.provide_secret(other_min_secret, other.get_secret(other_min_secret).unwrap())?;
+ }
+ if let Some(ref local_tx) = self.current_local_signed_commitment_tx {
+ if let Some(ref other_local_tx) = other.current_local_signed_commitment_tx {
+ let our_commitment_number = 0xffffffffffff - ((((local_tx.tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (local_tx.tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
+ let other_commitment_number = 0xffffffffffff - ((((other_local_tx.tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (other_local_tx.tx.lock_time as u64 & 0xffffff)) ^ other.commitment_transaction_number_obscure_factor);
+ if our_commitment_number >= other_commitment_number {
+ self.key_storage = other.key_storage;
+ }
+ }
+ }
+ // TODO: We should use current_remote_commitment_number and the commitment number out of
+ // local transactions to decide how to merge
+ if our_min_secret >= other_min_secret {
+ self.their_cur_revocation_points = other.their_cur_revocation_points;
+ for (txid, htlcs) in other.remote_claimable_outpoints.drain() {
+ self.remote_claimable_outpoints.insert(txid, htlcs);
+ }
+ if let Some(local_tx) = other.prev_local_signed_commitment_tx {
+ self.prev_local_signed_commitment_tx = Some(local_tx);
+ }
+ if let Some(local_tx) = other.current_local_signed_commitment_tx {
+ self.current_local_signed_commitment_tx = Some(local_tx);
+ }
+ self.payment_preimages = other.payment_preimages;
+ self.to_remote_rescue = other.to_remote_rescue;
+ }
+
+ self.current_remote_commitment_number = cmp::min(self.current_remote_commitment_number, other.current_remote_commitment_number);
+ Ok(())
+ }
+
+ /// Panics if commitment_transaction_number_obscure_factor doesn't fit in 48 bits
+ pub(super) fn set_commitment_obscure_factor(&mut self, commitment_transaction_number_obscure_factor: u64) {
+ assert!(commitment_transaction_number_obscure_factor < (1 << 48));
+ self.commitment_transaction_number_obscure_factor = commitment_transaction_number_obscure_factor;
+ }
+
+ /// Allows this monitor to scan only for transactions which are applicable. Note that this is
+ /// optional, without it this monitor cannot be used in an SPV client, but you may wish to
+ /// avoid this (or call unset_funding_info) on a monitor you wish to send to a watchtower as it
+ /// provides slightly better privacy.
+ /// It's the responsibility of the caller to register outpoint and script with passing the former
+ /// value as key to add_update_monitor.
+ pub(super) fn set_funding_info(&mut self, new_funding_info: (OutPoint, Script)) {
+ match self.key_storage {
+ Storage::Local { ref mut funding_info, .. } => {
+ *funding_info = Some(new_funding_info);
+ },
+ Storage::Watchtower { .. } => {
+ panic!("Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode?");
+ }
+ }
+ }
+
+ /// We log these base keys at channel opening to being able to rebuild redeemscript in case of leaked revoked commit tx
+ pub(super) fn set_their_base_keys(&mut self, their_htlc_base_key: &PublicKey, their_delayed_payment_base_key: &PublicKey) {
+ self.their_htlc_base_key = Some(their_htlc_base_key.clone());
+ self.their_delayed_payment_base_key = Some(their_delayed_payment_base_key.clone());
+ }
+
+ pub(super) fn set_their_to_self_delay(&mut self, their_to_self_delay: u16) {
+ self.their_to_self_delay = Some(their_to_self_delay);
+ }
+
+ pub(super) fn unset_funding_info(&mut self) {
+ match self.key_storage {
+ Storage::Local { ref mut funding_info, .. } => {
+ *funding_info = None;
+ },
+ Storage::Watchtower { .. } => {
+ panic!("Channel somehow ended up with its internal ChannelMonitor being in Watchtower mode?");
+ },
+ }
+ }
+
+ /// Gets the funding transaction outpoint of the channel this ChannelMonitor is monitoring for.
+ pub fn get_funding_txo(&self) -> Option<OutPoint> {
+ match self.key_storage {
+ Storage::Local { ref funding_info, .. } => {
+ match funding_info {
+ &Some((outpoint, _)) => Some(outpoint),
+ &None => None
+ }
+ },
+ Storage::Watchtower { .. } => {
+ return None;
+ }
+ }
+ }
+
+ /// Gets the sets of all outpoints which this ChannelMonitor expects to hear about spends of.
+ /// Generally useful when deserializing as during normal operation the return values of
+ /// block_connected are sufficient to ensure all relevant outpoints are being monitored (note
+ /// that the get_funding_txo outpoint and transaction must also be monitored for!).
+ pub fn get_monitored_outpoints(&self) -> Vec<(Sha256dHash, u32, &Script)> {
+ let mut res = Vec::with_capacity(self.remote_commitment_txn_on_chain.len() * 2);
+ for (ref txid, &(_, ref outputs)) in self.remote_commitment_txn_on_chain.iter() {
+ for (idx, output) in outputs.iter().enumerate() {
+ res.push(((*txid).clone(), idx as u32, output));
+ }
+ }
+ res
+ }
+
+ /// Serializes into a vec, with various modes for the exposed pub fns
+ fn write<W: Writer>(&self, writer: &mut W, for_local_storage: bool) -> Result<(), ::std::io::Error> {
+ //TODO: We still write out all the serialization here manually instead of using the fancy
+ //serialization framework we have, we should migrate things over to it.
+ writer.write_all(&[SERIALIZATION_VERSION; 1])?;
+ writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+
+ // Set in initial Channel-object creation, so should always be set by now:
+ U48(self.commitment_transaction_number_obscure_factor).write(writer)?;
+
+ macro_rules! write_option {
+ ($thing: expr) => {
+ match $thing {
+ &Some(ref t) => {
+ 1u8.write(writer)?;
+ t.write(writer)?;
+ },
+ &None => 0u8.write(writer)?,
+ }
+ }
+ }
+
+ match self.key_storage {
+ Storage::Local { ref revocation_base_key, ref htlc_base_key, ref delayed_payment_base_key, ref payment_base_key, ref shutdown_pubkey, ref prev_latest_per_commitment_point, ref latest_per_commitment_point, ref funding_info, ref current_remote_commitment_txid, ref prev_remote_commitment_txid } => {
+ writer.write_all(&[0; 1])?;
+ writer.write_all(&revocation_base_key[..])?;
+ writer.write_all(&htlc_base_key[..])?;
+ writer.write_all(&delayed_payment_base_key[..])?;
+ writer.write_all(&payment_base_key[..])?;
+ writer.write_all(&shutdown_pubkey.serialize())?;
+ prev_latest_per_commitment_point.write(writer)?;
+ latest_per_commitment_point.write(writer)?;
+ match funding_info {
+ &Some((ref outpoint, ref script)) => {
+ writer.write_all(&outpoint.txid[..])?;
+ writer.write_all(&byte_utils::be16_to_array(outpoint.index))?;
+ script.write(writer)?;
+ },
+ &None => {
+ debug_assert!(false, "Try to serialize a useless Local monitor !");
+ },
+ }
+ current_remote_commitment_txid.write(writer)?;
+ prev_remote_commitment_txid.write(writer)?;
+ },
+ Storage::Watchtower { .. } => unimplemented!(),
+ }
+
+ writer.write_all(&self.their_htlc_base_key.as_ref().unwrap().serialize())?;
+ writer.write_all(&self.their_delayed_payment_base_key.as_ref().unwrap().serialize())?;
+
+ match self.their_cur_revocation_points {
+ Some((idx, pubkey, second_option)) => {
+ writer.write_all(&byte_utils::be48_to_array(idx))?;
+ writer.write_all(&pubkey.serialize())?;
+ match second_option {
+ Some(second_pubkey) => {
+ writer.write_all(&second_pubkey.serialize())?;
+ },
+ None => {
+ writer.write_all(&[0; 33])?;
+ },
+ }
+ },
+ None => {
+ writer.write_all(&byte_utils::be48_to_array(0))?;
+ },
+ }
+
+ writer.write_all(&byte_utils::be16_to_array(self.our_to_self_delay))?;
+ writer.write_all(&byte_utils::be16_to_array(self.their_to_self_delay.unwrap()))?;
+
+ for &(ref secret, ref idx) in self.old_secrets.iter() {
+ writer.write_all(secret)?;
+ writer.write_all(&byte_utils::be64_to_array(*idx))?;
+ }
+
+ macro_rules! serialize_htlc_in_commitment {
+ ($htlc_output: expr) => {
+ writer.write_all(&[$htlc_output.offered as u8; 1])?;
+ writer.write_all(&byte_utils::be64_to_array($htlc_output.amount_msat))?;
+ writer.write_all(&byte_utils::be32_to_array($htlc_output.cltv_expiry))?;
+ writer.write_all(&$htlc_output.payment_hash.0[..])?;
+ $htlc_output.transaction_output_index.write(writer)?;
+ }
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.remote_claimable_outpoints.len() as u64))?;
+ for (ref txid, ref htlc_infos) in self.remote_claimable_outpoints.iter() {
+ writer.write_all(&txid[..])?;
+ writer.write_all(&byte_utils::be64_to_array(htlc_infos.len() as u64))?;
+ for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() {
+ serialize_htlc_in_commitment!(htlc_output);
+ write_option!(htlc_source);
+ }
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.remote_commitment_txn_on_chain.len() as u64))?;
+ for (ref txid, &(commitment_number, ref txouts)) in self.remote_commitment_txn_on_chain.iter() {
+ writer.write_all(&txid[..])?;
+ writer.write_all(&byte_utils::be48_to_array(commitment_number))?;
+ (txouts.len() as u64).write(writer)?;
+ for script in txouts.iter() {
+ script.write(writer)?;
+ }
+ }
+
+ if for_local_storage {
+ writer.write_all(&byte_utils::be64_to_array(self.remote_hash_commitment_number.len() as u64))?;
+ for (ref payment_hash, commitment_number) in self.remote_hash_commitment_number.iter() {
+ writer.write_all(&payment_hash.0[..])?;
+ writer.write_all(&byte_utils::be48_to_array(*commitment_number))?;
+ }
+ } else {
+ writer.write_all(&byte_utils::be64_to_array(0))?;
+ }
+
+ macro_rules! serialize_local_tx {
+ ($local_tx: expr) => {
+ if let Err(e) = $local_tx.tx.consensus_encode(&mut WriterWriteAdaptor(writer)) {
+ match e {
+ encode::Error::Io(e) => return Err(e),
+ _ => panic!("local tx must have been well-formed!"),
+ }
+ }
+
+ writer.write_all(&$local_tx.revocation_key.serialize())?;
+ writer.write_all(&$local_tx.a_htlc_key.serialize())?;
+ writer.write_all(&$local_tx.b_htlc_key.serialize())?;
+ writer.write_all(&$local_tx.delayed_payment_key.serialize())?;
+
+ writer.write_all(&byte_utils::be64_to_array($local_tx.feerate_per_kw))?;
+ writer.write_all(&byte_utils::be64_to_array($local_tx.htlc_outputs.len() as u64))?;
+ for &(ref htlc_output, ref sigs, ref htlc_source) in $local_tx.htlc_outputs.iter() {
+ serialize_htlc_in_commitment!(htlc_output);
+ if let &Some((ref their_sig, ref our_sig)) = sigs {
+ 1u8.write(writer)?;
+ writer.write_all(&their_sig.serialize_compact())?;
+ writer.write_all(&our_sig.serialize_compact())?;
+ } else {
+ 0u8.write(writer)?;
+ }
+ write_option!(htlc_source);
+ }
+ }
+ }
+
+ if let Some(ref prev_local_tx) = self.prev_local_signed_commitment_tx {
+ writer.write_all(&[1; 1])?;
+ serialize_local_tx!(prev_local_tx);
+ } else {
+ writer.write_all(&[0; 1])?;
+ }
+
+ if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
+ writer.write_all(&[1; 1])?;
+ serialize_local_tx!(cur_local_tx);
+ } else {
+ writer.write_all(&[0; 1])?;
+ }
+
+ if for_local_storage {
+ writer.write_all(&byte_utils::be48_to_array(self.current_remote_commitment_number))?;
+ } else {
+ writer.write_all(&byte_utils::be48_to_array(0))?;
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.payment_preimages.len() as u64))?;
+ for payment_preimage in self.payment_preimages.values() {
+ writer.write_all(&payment_preimage.0[..])?;
+ }
+
+ self.last_block_hash.write(writer)?;
+ self.destination_script.write(writer)?;
+ if let Some((ref to_remote_script, ref local_key)) = self.to_remote_rescue {
+ writer.write_all(&[1; 1])?;
+ to_remote_script.write(writer)?;
+ local_key.write(writer)?;
+ } else {
+ writer.write_all(&[0; 1])?;
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.our_claim_txn_waiting_first_conf.len() as u64))?;
+ for (ref outpoint, claim_tx_data) in self.our_claim_txn_waiting_first_conf.iter() {
+ outpoint.write(writer)?;
+ writer.write_all(&byte_utils::be32_to_array(claim_tx_data.0))?;
+ match claim_tx_data.1 {
+ TxMaterial::Revoked { ref script, ref pubkey, ref key, ref is_htlc, ref amount} => {
+ writer.write_all(&[0; 1])?;
+ script.write(writer)?;
+ pubkey.write(writer)?;
+ writer.write_all(&key[..])?;
+ if *is_htlc {
+ writer.write_all(&[0; 1])?;
+ } else {
+ writer.write_all(&[1; 1])?;
+ }
+ writer.write_all(&byte_utils::be64_to_array(*amount))?;
+ },
+ TxMaterial::RemoteHTLC { ref script, ref key, ref preimage, ref amount } => {
+ writer.write_all(&[1; 1])?;
+ script.write(writer)?;
+ key.write(writer)?;
+ preimage.write(writer)?;
+ writer.write_all(&byte_utils::be64_to_array(*amount))?;
+ },
+ TxMaterial::LocalHTLC { ref script, ref sigs, ref preimage, ref amount } => {
+ writer.write_all(&[2; 1])?;
+ script.write(writer)?;
+ sigs.0.write(writer)?;
+ sigs.1.write(writer)?;
+ preimage.write(writer)?;
+ writer.write_all(&byte_utils::be64_to_array(*amount))?;
+ }
+ }
+ writer.write_all(&byte_utils::be64_to_array(claim_tx_data.2))?;
+ writer.write_all(&byte_utils::be32_to_array(claim_tx_data.3))?;
+ writer.write_all(&byte_utils::be32_to_array(claim_tx_data.4))?;
+ }
+
+ writer.write_all(&byte_utils::be64_to_array(self.onchain_events_waiting_threshold_conf.len() as u64))?;
+ for (ref target, ref events) in self.onchain_events_waiting_threshold_conf.iter() {
+ writer.write_all(&byte_utils::be32_to_array(**target))?;
+ writer.write_all(&byte_utils::be64_to_array(events.len() as u64))?;
+ for ev in events.iter() {
+ match *ev {
+ OnchainEvent::Claim { ref outpoint } => {
+ writer.write_all(&[0; 1])?;
+ outpoint.write(writer)?;
+ },
+ OnchainEvent::HTLCUpdate { ref htlc_update } => {
+ writer.write_all(&[1; 1])?;
+ htlc_update.0.write(writer)?;
+ htlc_update.1.write(writer)?;
+ }
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Writes this monitor into the given writer, suitable for writing to disk.
+ ///
+ /// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which
+ /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
+ /// the "reorg path" (ie not just starting at the same height but starting at the highest
+ /// common block that appears on your best chain as well as on the chain which contains the
+ /// last block hash returned) upon deserializing the object!
+ pub fn write_for_disk<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ self.write(writer, true)
+ }
+
+ /// Encodes this monitor into the given writer, suitable for sending to a remote watchtower
+ ///
+ /// Note that the deserializer is only implemented for (Sha256dHash, ChannelMonitor), which
+ /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
+ /// the "reorg path" (ie not just starting at the same height but starting at the highest
+ /// common block that appears on your best chain as well as on the chain which contains the
+ /// last block hash returned) upon deserializing the object!
+ pub fn write_for_watchtower<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ self.write(writer, false)
+ }
+
+ /// Can only fail if idx is < get_min_seen_secret
+ pub(super) fn get_secret(&self, idx: u64) -> Option<[u8; 32]> {
+ for i in 0..self.old_secrets.len() {
+ if (idx & (!((1 << i) - 1))) == self.old_secrets[i].1 {
+ return Some(ChannelMonitor::derive_secret(self.old_secrets[i].0, i as u8, idx))
+ }
+ }
+ assert!(idx < self.get_min_seen_secret());
+ None
+ }
+
+ pub(super) fn get_min_seen_secret(&self) -> u64 {
+ //TODO This can be optimized?
+ let mut min = 1 << 48;
+ for &(_, idx) in self.old_secrets.iter() {
+ if idx < min {
+ min = idx;
+ }
+ }
+ min
+ }
+
+ pub(super) fn get_cur_remote_commitment_number(&self) -> u64 {
+ self.current_remote_commitment_number
+ }
+
+ pub(super) fn get_cur_local_commitment_number(&self) -> u64 {
+ if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+ 0xffff_ffff_ffff - ((((local_tx.tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (local_tx.tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor)
+ } else { 0xffff_ffff_ffff }
+ }
+
+ /// Attempts to claim a remote commitment transaction's outputs using the revocation key and
+ /// data in remote_claimable_outpoints. Will directly claim any HTLC outputs which expire at a
+ /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for
+ /// HTLC-Success/HTLC-Timeout transactions.
+ /// Return updates for HTLC pending in the channel and failed automatically by the broadcast of
+ /// revoked remote commitment tx
+ fn check_spend_remote_transaction(&mut self, tx: &Transaction, height: u32, fee_estimator: &FeeEstimator) -> (Vec<Transaction>, (Sha256dHash, Vec<TxOut>), Vec<SpendableOutputDescriptor>) {
+ // Most secp and related errors trying to create keys means we have no hope of constructing
+ // a spend transaction...so we return no transactions to broadcast
+ let mut txn_to_broadcast = Vec::new();
+ let mut watch_outputs = Vec::new();
+ let mut spendable_outputs = Vec::new();
+
+ let commitment_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers!
+ let per_commitment_option = self.remote_claimable_outpoints.get(&commitment_txid);
+
+ macro_rules! ignore_error {
+ ( $thing : expr ) => {
+ match $thing {
+ Ok(a) => a,
+ Err(_) => return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs)
+ }
+ };
+ }
+
+ let commitment_number = 0xffffffffffff - ((((tx.input[0].sequence as u64 & 0xffffff) << 3*8) | (tx.lock_time as u64 & 0xffffff)) ^ self.commitment_transaction_number_obscure_factor);
+ if commitment_number >= self.get_min_seen_secret() {
+ let secret = self.get_secret(commitment_number).unwrap();
+ let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
+ let (revocation_pubkey, b_htlc_key, local_payment_key) = match self.key_storage {
+ Storage::Local { ref revocation_base_key, ref htlc_base_key, ref payment_base_key, .. } => {
+ let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
+ (ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &PublicKey::from_secret_key(&self.secp_ctx, &revocation_base_key))),
+ ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &per_commitment_point, &PublicKey::from_secret_key(&self.secp_ctx, &htlc_base_key))),
+ Some(ignore_error!(chan_utils::derive_private_key(&self.secp_ctx, &per_commitment_point, &payment_base_key))))
+ },
+ Storage::Watchtower { ref revocation_base_key, ref htlc_base_key, .. } => {
+ let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
+ (ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &revocation_base_key)),
+ ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &per_commitment_point, &htlc_base_key)),
+ None)
+ },
+ };
+ let delayed_key = ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &self.their_delayed_payment_base_key.unwrap()));
+ let a_htlc_key = match self.their_htlc_base_key {
+ None => return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs),
+ Some(their_htlc_base_key) => ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key), &their_htlc_base_key)),
+ };
+
+ let revokeable_redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.our_to_self_delay, &delayed_key);
+ let revokeable_p2wsh = revokeable_redeemscript.to_v0_p2wsh();
+
+ let local_payment_p2wpkh = if let Some(payment_key) = local_payment_key {
+ // Note that the Network here is ignored as we immediately drop the address for the
+ // script_pubkey version.
+ let payment_hash160 = Hash160::hash(&PublicKey::from_secret_key(&self.secp_ctx, &payment_key).serialize());
+ Some(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&payment_hash160[..]).into_script())
+ } else { None };
+
+ let mut total_value = 0;
+ let mut inputs = Vec::new();
+ let mut inputs_info = Vec::new();
+ let mut inputs_desc = Vec::new();
+
+ for (idx, outp) in tx.output.iter().enumerate() {
+ if outp.script_pubkey == revokeable_p2wsh {
+ inputs.push(TxIn {
+ previous_output: BitcoinOutPoint {
+ txid: commitment_txid,
+ vout: idx as u32,
+ },
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ });
+ inputs_desc.push(InputDescriptors::RevokedOutput);
+ inputs_info.push((None, outp.value, self.our_to_self_delay as u32));
+ total_value += outp.value;
+ } else if Some(&outp.script_pubkey) == local_payment_p2wpkh.as_ref() {
+ spendable_outputs.push(SpendableOutputDescriptor::DynamicOutputP2WPKH {
+ outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 },
+ key: local_payment_key.unwrap(),
+ output: outp.clone(),
+ });
+ }
+ }
+
+ macro_rules! sign_input {
+ ($sighash_parts: expr, $input: expr, $htlc_idx: expr, $amount: expr) => {
+ {
+ let (sig, redeemscript, revocation_key) = match self.key_storage {
+ Storage::Local { ref revocation_base_key, .. } => {
+ let redeemscript = if $htlc_idx.is_none() { revokeable_redeemscript.clone() } else {
+ let htlc = &per_commitment_option.unwrap()[$htlc_idx.unwrap()].0;
+ chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey)
+ };
+ let sighash = hash_to_message!(&$sighash_parts.sighash_all(&$input, &redeemscript, $amount)[..]);
+ let revocation_key = ignore_error!(chan_utils::derive_private_revocation_key(&self.secp_ctx, &per_commitment_key, &revocation_base_key));
+ (self.secp_ctx.sign(&sighash, &revocation_key), redeemscript, revocation_key)
+ },
+ Storage::Watchtower { .. } => {
+ unimplemented!();
+ }
+ };
+ $input.witness.push(sig.serialize_der().to_vec());
+ $input.witness[0].push(SigHashType::All as u8);
+ if $htlc_idx.is_none() {
+ $input.witness.push(vec!(1));
+ } else {
+ $input.witness.push(revocation_pubkey.serialize().to_vec());
+ }
+ $input.witness.push(redeemscript.clone().into_bytes());
+ (redeemscript, revocation_key)
+ }
+ }
+ }
+
+ if let Some(ref per_commitment_data) = per_commitment_option {
+ inputs.reserve_exact(per_commitment_data.len());
+
+ for (idx, &(ref htlc, _)) in per_commitment_data.iter().enumerate() {
+ if let Some(transaction_output_index) = htlc.transaction_output_index {
+ let expected_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey);
+ if transaction_output_index as usize >= tx.output.len() ||
+ tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 ||
+ tx.output[transaction_output_index as usize].script_pubkey != expected_script.to_v0_p2wsh() {
+ return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs); // Corrupted per_commitment_data, fuck this user
+ }
+ let input = TxIn {
+ previous_output: BitcoinOutPoint {
+ txid: commitment_txid,
+ vout: transaction_output_index,
+ },
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ };
+ if htlc.cltv_expiry > height + CLTV_SHARED_CLAIM_BUFFER {
+ inputs.push(input);
+ inputs_desc.push(if htlc.offered { InputDescriptors::RevokedOfferedHTLC } else { InputDescriptors::RevokedReceivedHTLC });
+ inputs_info.push((Some(idx), tx.output[transaction_output_index as usize].value, htlc.cltv_expiry));
+ total_value += tx.output[transaction_output_index as usize].value;
+ } else {
+ let mut single_htlc_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: vec![input],
+ output: vec!(TxOut {
+ script_pubkey: self.destination_script.clone(),
+ value: htlc.amount_msat / 1000,
+ }),
+ };
+ let predicted_weight = single_htlc_tx.get_weight() + Self::get_witnesses_weight(&[if htlc.offered { InputDescriptors::RevokedOfferedHTLC } else { InputDescriptors::RevokedReceivedHTLC }]);
+ let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
+ let mut used_feerate;
+ if subtract_high_prio_fee!(self, fee_estimator, single_htlc_tx.output[0].value, predicted_weight, tx.txid(), used_feerate) {
+ let sighash_parts = bip143::SighashComponents::new(&single_htlc_tx);
+ let (redeemscript, revocation_key) = sign_input!(sighash_parts, single_htlc_tx.input[0], Some(idx), htlc.amount_msat / 1000);
+ assert!(predicted_weight >= single_htlc_tx.get_weight());
+ match self.our_claim_txn_waiting_first_conf.entry(single_htlc_tx.input[0].previous_output.clone()) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::Revoked { script: redeemscript, pubkey: Some(revocation_pubkey), key: revocation_key, is_htlc: true, amount: htlc.amount_msat / 1000 }, used_feerate, htlc.cltv_expiry, height)); }
+ }
+ txn_to_broadcast.push(single_htlc_tx);
+ }
+ }
+ }
+ }
+ }
+
+ if !inputs.is_empty() || !txn_to_broadcast.is_empty() || per_commitment_option.is_some() { // ie we're confident this is actually ours
+ // We're definitely a remote commitment transaction!
+ log_trace!(self, "Got broadcast of revoked remote commitment transaction, generating general spend tx with {} inputs and {} other txn to broadcast", inputs.len(), txn_to_broadcast.len());
+ watch_outputs.append(&mut tx.output.clone());
+ self.remote_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
+
+ macro_rules! check_htlc_fails {
+ ($txid: expr, $commitment_tx: expr) => {
+ if let Some(ref outpoints) = self.remote_claimable_outpoints.get($txid) {
+ for &(ref htlc, ref source_option) in outpoints.iter() {
+ if let &Some(ref source) = source_option {
+ log_info!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of revoked remote commitment transaction, waiting for confirmation (at height {})", log_bytes!(htlc.payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
+ match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let e = entry.get_mut();
+ e.retain(|ref event| {
+ match **event {
+ OnchainEvent::HTLCUpdate { ref htlc_update } => {
+ return htlc_update.0 != **source
+ },
+ _ => return true
+ }
+ });
+ e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())});
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if let Storage::Local { ref current_remote_commitment_txid, ref prev_remote_commitment_txid, .. } = self.key_storage {
+ if let &Some(ref txid) = current_remote_commitment_txid {
+ check_htlc_fails!(txid, "current");
+ }
+ if let &Some(ref txid) = prev_remote_commitment_txid {
+ check_htlc_fails!(txid, "remote");
+ }
+ }
+ // No need to check local commitment txn, symmetric HTLCSource must be present as per-htlc data on remote commitment tx
+ }
+ if inputs.is_empty() { return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs); } // Nothing to be done...probably a false positive/local tx
+
+ let outputs = vec!(TxOut {
+ script_pubkey: self.destination_script.clone(),
+ value: total_value,
+ });
+ let mut spend_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: inputs,
+ output: outputs,
+ };
+
+ let predicted_weight = spend_tx.get_weight() + Self::get_witnesses_weight(&inputs_desc[..]);
+
+ let mut used_feerate;
+ if !subtract_high_prio_fee!(self, fee_estimator, spend_tx.output[0].value, predicted_weight, tx.txid(), used_feerate) {
+ return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs);
+ }
+
+ let sighash_parts = bip143::SighashComponents::new(&spend_tx);
+
+ for (input, info) in spend_tx.input.iter_mut().zip(inputs_info.iter()) {
+ let (redeemscript, revocation_key) = sign_input!(sighash_parts, input, info.0, info.1);
+ let height_timer = Self::get_height_timer(height, info.2);
+ match self.our_claim_txn_waiting_first_conf.entry(input.previous_output.clone()) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::Revoked { script: redeemscript, pubkey: if info.0.is_some() { Some(revocation_pubkey) } else { None }, key: revocation_key, is_htlc: if info.0.is_some() { true } else { false }, amount: info.1 }, used_feerate, if !info.0.is_some() { height + info.2 } else { info.2 }, height)); }
+ }
+ }
+ assert!(predicted_weight >= spend_tx.get_weight());
+
+ spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
+ outpoint: BitcoinOutPoint { txid: spend_tx.txid(), vout: 0 },
+ output: spend_tx.output[0].clone(),
+ });
+ txn_to_broadcast.push(spend_tx);
+ } else if let Some(per_commitment_data) = per_commitment_option {
+ // While this isn't useful yet, there is a potential race where if a counterparty
+ // revokes a state at the same time as the commitment transaction for that state is
+ // confirmed, and the watchtower receives the block before the user, the user could
+ // upload a new ChannelMonitor with the revocation secret but the watchtower has
+ // already processed the block, resulting in the remote_commitment_txn_on_chain entry
+ // not being generated by the above conditional. Thus, to be safe, we go ahead and
+ // insert it here.
+ watch_outputs.append(&mut tx.output.clone());
+ self.remote_commitment_txn_on_chain.insert(commitment_txid, (commitment_number, tx.output.iter().map(|output| { output.script_pubkey.clone() }).collect()));
+
+ log_trace!(self, "Got broadcast of non-revoked remote commitment transaction {}", commitment_txid);
+
+ macro_rules! check_htlc_fails {
+ ($txid: expr, $commitment_tx: expr, $id: tt) => {
+ if let Some(ref latest_outpoints) = self.remote_claimable_outpoints.get($txid) {
+ $id: for &(ref htlc, ref source_option) in latest_outpoints.iter() {
+ if let &Some(ref source) = source_option {
+ // Check if the HTLC is present in the commitment transaction that was
+ // broadcast, but not if it was below the dust limit, which we should
+ // fail backwards immediately as there is no way for us to learn the
+ // payment_preimage.
+ // Note that if the dust limit were allowed to change between
+ // commitment transactions we'd want to be check whether *any*
+ // broadcastable commitment transaction has the HTLC in it, but it
+ // cannot currently change after channel initialization, so we don't
+ // need to here.
+ for &(ref broadcast_htlc, ref broadcast_source) in per_commitment_data.iter() {
+ if broadcast_htlc.transaction_output_index.is_some() && Some(source) == broadcast_source.as_ref() {
+ continue $id;
+ }
+ }
+ log_trace!(self, "Failing HTLC with payment_hash {} from {} remote commitment tx due to broadcast of remote commitment transaction", log_bytes!(htlc.payment_hash.0), $commitment_tx);
+ match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let e = entry.get_mut();
+ e.retain(|ref event| {
+ match **event {
+ OnchainEvent::HTLCUpdate { ref htlc_update } => {
+ return htlc_update.0 != **source
+ },
+ _ => return true
+ }
+ });
+ e.push(OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())});
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ((**source).clone(), htlc.payment_hash.clone())}]);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ if let Storage::Local { ref current_remote_commitment_txid, ref prev_remote_commitment_txid, .. } = self.key_storage {
+ if let &Some(ref txid) = current_remote_commitment_txid {
+ check_htlc_fails!(txid, "current", 'current_loop);
+ }
+ if let &Some(ref txid) = prev_remote_commitment_txid {
+ check_htlc_fails!(txid, "previous", 'prev_loop);
+ }
+ }
+
+ if let Some(revocation_points) = self.their_cur_revocation_points {
+ let revocation_point_option =
+ if revocation_points.0 == commitment_number { Some(&revocation_points.1) }
+ else if let Some(point) = revocation_points.2.as_ref() {
+ if revocation_points.0 == commitment_number + 1 { Some(point) } else { None }
+ } else { None };
+ if let Some(revocation_point) = revocation_point_option {
+ let (revocation_pubkey, b_htlc_key) = match self.key_storage {
+ Storage::Local { ref revocation_base_key, ref htlc_base_key, .. } => {
+ (ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, revocation_point, &PublicKey::from_secret_key(&self.secp_ctx, &revocation_base_key))),
+ ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, revocation_point, &PublicKey::from_secret_key(&self.secp_ctx, &htlc_base_key))))
+ },
+ Storage::Watchtower { ref revocation_base_key, ref htlc_base_key, .. } => {
+ (ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, revocation_point, &revocation_base_key)),
+ ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, revocation_point, &htlc_base_key)))
+ },
+ };
+ let a_htlc_key = match self.their_htlc_base_key {
+ None => return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs),
+ Some(their_htlc_base_key) => ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, revocation_point, &their_htlc_base_key)),
+ };
+
+ for (idx, outp) in tx.output.iter().enumerate() {
+ if outp.script_pubkey.is_v0_p2wpkh() {
+ match self.key_storage {
+ Storage::Local { ref payment_base_key, .. } => {
+ if let Ok(local_key) = chan_utils::derive_private_key(&self.secp_ctx, &revocation_point, &payment_base_key) {
+ spendable_outputs.push(SpendableOutputDescriptor::DynamicOutputP2WPKH {
+ outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 },
+ key: local_key,
+ output: outp.clone(),
+ });
+ }
+ },
+ Storage::Watchtower { .. } => {}
+ }
+ break; // Only to_remote ouput is claimable
+ }
+ }
+
+ let mut total_value = 0;
+ let mut inputs = Vec::new();
+ let mut inputs_desc = Vec::new();
+ let mut inputs_info = Vec::new();
+
+ macro_rules! sign_input {
+ ($sighash_parts: expr, $input: expr, $amount: expr, $preimage: expr) => {
+ {
+ let (sig, redeemscript, htlc_key) = match self.key_storage {
+ Storage::Local { ref htlc_base_key, .. } => {
+ let htlc = &per_commitment_option.unwrap()[$input.sequence as usize].0;
+ let redeemscript = chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey);
+ let sighash = hash_to_message!(&$sighash_parts.sighash_all(&$input, &redeemscript, $amount)[..]);
+ let htlc_key = ignore_error!(chan_utils::derive_private_key(&self.secp_ctx, revocation_point, &htlc_base_key));
+ (self.secp_ctx.sign(&sighash, &htlc_key), redeemscript, htlc_key)
+ },
+ Storage::Watchtower { .. } => {
+ unimplemented!();
+ }
+ };
+ $input.witness.push(sig.serialize_der().to_vec());
+ $input.witness[0].push(SigHashType::All as u8);
+ $input.witness.push($preimage);
+ $input.witness.push(redeemscript.clone().into_bytes());
+ (redeemscript, htlc_key)
+ }
+ }
+ }
+
+ for (idx, &(ref htlc, _)) in per_commitment_data.iter().enumerate() {
+ if let Some(transaction_output_index) = htlc.transaction_output_index {
+ let expected_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &a_htlc_key, &b_htlc_key, &revocation_pubkey);
+ if transaction_output_index as usize >= tx.output.len() ||
+ tx.output[transaction_output_index as usize].value != htlc.amount_msat / 1000 ||
+ tx.output[transaction_output_index as usize].script_pubkey != expected_script.to_v0_p2wsh() {
+ return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs); // Corrupted per_commitment_data, fuck this user
+ }
+ if let Some(payment_preimage) = self.payment_preimages.get(&htlc.payment_hash) {
- let predicted_weight = single_htlc_tx.get_weight() + Self::get_witnesses_weight(&[if htlc.offered { InputDescriptors::OfferedHTLC } else { InputDescriptors::ReceivedHTLC }]);
- let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
- let mut used_feerate;
- if subtract_high_prio_fee!(self, fee_estimator, single_htlc_tx.output[0].value, predicted_weight, tx.txid(), used_feerate) {
- let sighash_parts = bip143::SighashComponents::new(&single_htlc_tx);
- let (redeemscript, htlc_key) = sign_input!(sighash_parts, single_htlc_tx.input[0], htlc.amount_msat / 1000, payment_preimage.0.to_vec());
- assert!(predicted_weight >= single_htlc_tx.get_weight());
- spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
- outpoint: BitcoinOutPoint { txid: single_htlc_tx.txid(), vout: 0 },
- output: single_htlc_tx.output[0].clone(),
- });
- match self.our_claim_txn_waiting_first_conf.entry(single_htlc_tx.input[0].previous_output.clone()) {
- hash_map::Entry::Occupied(_) => {},
- hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::RemoteHTLC { script: redeemscript, key: htlc_key, preimage: Some(*payment_preimage), amount: htlc.amount_msat / 1000 }, used_feerate, htlc.cltv_expiry, height)); }
++ if htlc.offered {
++ let input = TxIn {
++ previous_output: BitcoinOutPoint {
++ txid: commitment_txid,
++ vout: transaction_output_index,
++ },
++ script_sig: Script::new(),
++ sequence: idx as u32, // reset to 0xfffffffd in sign_input
++ witness: Vec::new(),
+ };
- txn_to_broadcast.push(single_htlc_tx);
++ if htlc.cltv_expiry > height + CLTV_SHARED_CLAIM_BUFFER {
++ inputs.push(input);
++ inputs_desc.push(if htlc.offered { InputDescriptors::OfferedHTLC } else { InputDescriptors::ReceivedHTLC });
++ inputs_info.push((payment_preimage, tx.output[transaction_output_index as usize].value, htlc.cltv_expiry));
++ total_value += tx.output[transaction_output_index as usize].value;
++ } else {
++ let mut single_htlc_tx = Transaction {
++ version: 2,
++ lock_time: 0,
++ input: vec![input],
++ output: vec!(TxOut {
++ script_pubkey: self.destination_script.clone(),
++ value: htlc.amount_msat / 1000,
++ }),
++ };
++ let predicted_weight = single_htlc_tx.get_weight() + Self::get_witnesses_weight(&[if htlc.offered { InputDescriptors::OfferedHTLC } else { InputDescriptors::ReceivedHTLC }]);
++ let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
++ let mut used_feerate;
++ if subtract_high_prio_fee!(self, fee_estimator, single_htlc_tx.output[0].value, predicted_weight, tx.txid(), used_feerate) {
++ let sighash_parts = bip143::SighashComponents::new(&single_htlc_tx);
++ let (redeemscript, htlc_key) = sign_input!(sighash_parts, single_htlc_tx.input[0], htlc.amount_msat / 1000, payment_preimage.0.to_vec());
++ assert!(predicted_weight >= single_htlc_tx.get_weight());
++ spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
++ outpoint: BitcoinOutPoint { txid: single_htlc_tx.txid(), vout: 0 },
++ output: single_htlc_tx.output[0].clone(),
++ });
++ match self.our_claim_txn_waiting_first_conf.entry(single_htlc_tx.input[0].previous_output.clone()) {
++ hash_map::Entry::Occupied(_) => {},
++ hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::RemoteHTLC { script: redeemscript, key: htlc_key, preimage: Some(*payment_preimage), amount: htlc.amount_msat / 1000 }, used_feerate, htlc.cltv_expiry, height)); }
++ }
++ txn_to_broadcast.push(single_htlc_tx);
+ }
- let mut predicted_weight = spend_tx.get_weight() + Self::get_witnesses_weight(&inputs_desc[..]);
+ }
+ }
+ }
+ if !htlc.offered {
+ // TODO: If the HTLC has already expired, potentially merge it with the
+ // rest of the claim transaction, as above.
+ let input = TxIn {
+ previous_output: BitcoinOutPoint {
+ txid: commitment_txid,
+ vout: transaction_output_index,
+ },
+ script_sig: Script::new(),
+ sequence: idx as u32,
+ witness: Vec::new(),
+ };
+ let mut timeout_tx = Transaction {
+ version: 2,
+ lock_time: htlc.cltv_expiry,
+ input: vec![input],
+ output: vec!(TxOut {
+ script_pubkey: self.destination_script.clone(),
+ value: htlc.amount_msat / 1000,
+ }),
+ };
+ let predicted_weight = timeout_tx.get_weight() + Self::get_witnesses_weight(&[InputDescriptors::ReceivedHTLC]);
+ let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
+ let mut used_feerate;
+ if subtract_high_prio_fee!(self, fee_estimator, timeout_tx.output[0].value, predicted_weight, tx.txid(), used_feerate) {
+ let sighash_parts = bip143::SighashComponents::new(&timeout_tx);
+ let (redeemscript, htlc_key) = sign_input!(sighash_parts, timeout_tx.input[0], htlc.amount_msat / 1000, vec![0]);
+ assert!(predicted_weight >= timeout_tx.get_weight());
+ //TODO: track SpendableOutputDescriptor
+ match self.our_claim_txn_waiting_first_conf.entry(timeout_tx.input[0].previous_output.clone()) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::RemoteHTLC { script : redeemscript, key: htlc_key, preimage: None, amount: htlc.amount_msat / 1000 }, used_feerate, htlc.cltv_expiry, height)); }
+ }
+ }
+ txn_to_broadcast.push(timeout_tx);
+ }
+ }
+ }
+
+ if inputs.is_empty() { return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs); } // Nothing to be done...probably a false positive/local tx
+
+ let outputs = vec!(TxOut {
+ script_pubkey: self.destination_script.clone(),
+ value: total_value
+ });
+ let mut spend_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: inputs,
+ output: outputs,
+ };
+
++ let predicted_weight = spend_tx.get_weight() + Self::get_witnesses_weight(&inputs_desc[..]);
+
+ let mut used_feerate;
+ if !subtract_high_prio_fee!(self, fee_estimator, spend_tx.output[0].value, predicted_weight, tx.txid(), used_feerate) {
+ return (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs);
+ }
+
+ let sighash_parts = bip143::SighashComponents::new(&spend_tx);
+
+ for (input, info) in spend_tx.input.iter_mut().zip(inputs_info.iter()) {
+ let (redeemscript, htlc_key) = sign_input!(sighash_parts, input, info.1, (info.0).0.to_vec());
+ let height_timer = Self::get_height_timer(height, info.2);
+ match self.our_claim_txn_waiting_first_conf.entry(input.previous_output.clone()) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::RemoteHTLC { script: redeemscript, key: htlc_key, preimage: Some(*(info.0)), amount: info.1}, used_feerate, info.2, height)); }
+ }
+ }
+ assert!(predicted_weight >= spend_tx.get_weight());
+ spendable_outputs.push(SpendableOutputDescriptor::StaticOutput {
+ outpoint: BitcoinOutPoint { txid: spend_tx.txid(), vout: 0 },
+ output: spend_tx.output[0].clone(),
+ });
+ txn_to_broadcast.push(spend_tx);
+ }
+ }
+ } else if let Some((ref to_remote_rescue, ref local_key)) = self.to_remote_rescue {
+ for (idx, outp) in tx.output.iter().enumerate() {
+ if to_remote_rescue == &outp.script_pubkey {
+ spendable_outputs.push(SpendableOutputDescriptor::DynamicOutputP2WPKH {
+ outpoint: BitcoinOutPoint { txid: commitment_txid, vout: idx as u32 },
+ key: local_key.clone(),
+ output: outp.clone(),
+ });
+ }
+ }
+ }
+
+ (txn_to_broadcast, (commitment_txid, watch_outputs), spendable_outputs)
+ }
+
+ /// Attempts to claim a remote HTLC-Success/HTLC-Timeout's outputs using the revocation key
+ fn check_spend_remote_htlc(&mut self, tx: &Transaction, commitment_number: u64, height: u32, fee_estimator: &FeeEstimator) -> (Option<Transaction>, Option<SpendableOutputDescriptor>) {
+ if tx.input.len() != 1 || tx.output.len() != 1 {
+ return (None, None)
+ }
+
+ macro_rules! ignore_error {
+ ( $thing : expr ) => {
+ match $thing {
+ Ok(a) => a,
+ Err(_) => return (None, None)
+ }
+ };
+ }
+
+ let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (None, None); };
+ let per_commitment_key = ignore_error!(SecretKey::from_slice(&secret));
+ let per_commitment_point = PublicKey::from_secret_key(&self.secp_ctx, &per_commitment_key);
+ let revocation_pubkey = match self.key_storage {
+ Storage::Local { ref revocation_base_key, .. } => {
+ ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &PublicKey::from_secret_key(&self.secp_ctx, &revocation_base_key)))
+ },
+ Storage::Watchtower { ref revocation_base_key, .. } => {
+ ignore_error!(chan_utils::derive_public_revocation_key(&self.secp_ctx, &per_commitment_point, &revocation_base_key))
+ },
+ };
+ let delayed_key = match self.their_delayed_payment_base_key {
+ None => return (None, None),
+ Some(their_delayed_payment_base_key) => ignore_error!(chan_utils::derive_public_key(&self.secp_ctx, &per_commitment_point, &their_delayed_payment_base_key)),
+ };
+ let redeemscript = chan_utils::get_revokeable_redeemscript(&revocation_pubkey, self.our_to_self_delay, &delayed_key);
+ let revokeable_p2wsh = redeemscript.to_v0_p2wsh();
+ let htlc_txid = tx.txid(); //TODO: This is gonna be a performance bottleneck for watchtowers!
+
+ let mut inputs = Vec::new();
+ let mut amount = 0;
+
+ if tx.output[0].script_pubkey == revokeable_p2wsh { //HTLC transactions have one txin, one txout
+ inputs.push(TxIn {
+ previous_output: BitcoinOutPoint {
+ txid: htlc_txid,
+ vout: 0,
+ },
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ });
+ amount = tx.output[0].value;
+ }
+
+ if !inputs.is_empty() {
+ let outputs = vec!(TxOut {
+ script_pubkey: self.destination_script.clone(),
+ value: amount
+ });
+
+ let mut spend_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: inputs,
+ output: outputs,
+ };
+ let predicted_weight = spend_tx.get_weight() + Self::get_witnesses_weight(&[InputDescriptors::RevokedOutput]);
+ let mut used_feerate;
+ if !subtract_high_prio_fee!(self, fee_estimator, spend_tx.output[0].value, predicted_weight, tx.txid(), used_feerate) {
+ return (None, None);
+ }
+
+ let sighash_parts = bip143::SighashComponents::new(&spend_tx);
+
+ let (sig, revocation_key) = match self.key_storage {
+ Storage::Local { ref revocation_base_key, .. } => {
+ let sighash = hash_to_message!(&sighash_parts.sighash_all(&spend_tx.input[0], &redeemscript, amount)[..]);
+ let revocation_key = ignore_error!(chan_utils::derive_private_revocation_key(&self.secp_ctx, &per_commitment_key, &revocation_base_key));
+ (self.secp_ctx.sign(&sighash, &revocation_key), revocation_key)
+ }
+ Storage::Watchtower { .. } => {
+ unimplemented!();
+ }
+ };
+ spend_tx.input[0].witness.push(sig.serialize_der().to_vec());
+ spend_tx.input[0].witness[0].push(SigHashType::All as u8);
+ spend_tx.input[0].witness.push(vec!(1));
+ spend_tx.input[0].witness.push(redeemscript.clone().into_bytes());
+
+ assert!(predicted_weight >= spend_tx.get_weight());
+ let outpoint = BitcoinOutPoint { txid: spend_tx.txid(), vout: 0 };
+ let output = spend_tx.output[0].clone();
+ let height_timer = Self::get_height_timer(height, self.their_to_self_delay.unwrap() as u32); // We can safely unwrap given we are past channel opening
+ match self.our_claim_txn_waiting_first_conf.entry(spend_tx.input[0].previous_output.clone()) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert((height_timer, TxMaterial::Revoked { script: redeemscript, pubkey: None, key: revocation_key, is_htlc: false, amount: tx.output[0].value }, used_feerate, height + self.our_to_self_delay as u32, height)); }
+ }
+ (Some(spend_tx), Some(SpendableOutputDescriptor::StaticOutput { outpoint, output }))
+ } else { (None, None) }
+ }
+
+ fn broadcast_by_local_state(&self, local_tx: &LocalSignedTx, per_commitment_point: &Option<PublicKey>, delayed_payment_base_key: &Option<SecretKey>, height: u32) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, Vec<TxOut>, Vec<(BitcoinOutPoint, (u32, TxMaterial, u64, u32, u32))>) {
+ let mut res = Vec::with_capacity(local_tx.htlc_outputs.len());
+ let mut spendable_outputs = Vec::with_capacity(local_tx.htlc_outputs.len());
+ let mut watch_outputs = Vec::with_capacity(local_tx.htlc_outputs.len());
+ let mut pending_claims = Vec::with_capacity(local_tx.htlc_outputs.len());
+
+ macro_rules! add_dynamic_output {
+ ($father_tx: expr, $vout: expr) => {
+ if let Some(ref per_commitment_point) = *per_commitment_point {
+ if let Some(ref delayed_payment_base_key) = *delayed_payment_base_key {
+ if let Ok(local_delayedkey) = chan_utils::derive_private_key(&self.secp_ctx, per_commitment_point, delayed_payment_base_key) {
+ spendable_outputs.push(SpendableOutputDescriptor::DynamicOutputP2WSH {
+ outpoint: BitcoinOutPoint { txid: $father_tx.txid(), vout: $vout },
+ key: local_delayedkey,
+ witness_script: chan_utils::get_revokeable_redeemscript(&local_tx.revocation_key, self.our_to_self_delay, &local_tx.delayed_payment_key),
+ to_self_delay: self.our_to_self_delay,
+ output: $father_tx.output[$vout as usize].clone(),
+ });
+ }
+ }
+ }
+ }
+ }
+
+
+ let redeemscript = chan_utils::get_revokeable_redeemscript(&local_tx.revocation_key, self.their_to_self_delay.unwrap(), &local_tx.delayed_payment_key);
+ let revokeable_p2wsh = redeemscript.to_v0_p2wsh();
+ for (idx, output) in local_tx.tx.output.iter().enumerate() {
+ if output.script_pubkey == revokeable_p2wsh {
+ add_dynamic_output!(local_tx.tx, idx as u32);
+ break;
+ }
+ }
+
+ for &(ref htlc, ref sigs, _) in local_tx.htlc_outputs.iter() {
+ if let Some(transaction_output_index) = htlc.transaction_output_index {
+ if let &Some((ref their_sig, ref our_sig)) = sigs {
+ if htlc.offered {
+ log_trace!(self, "Broadcasting HTLC-Timeout transaction against local commitment transactions");
+ let mut htlc_timeout_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key);
+
+ htlc_timeout_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
+
+ htlc_timeout_tx.input[0].witness.push(their_sig.serialize_der().to_vec());
+ htlc_timeout_tx.input[0].witness[1].push(SigHashType::All as u8);
+ htlc_timeout_tx.input[0].witness.push(our_sig.serialize_der().to_vec());
+ htlc_timeout_tx.input[0].witness[2].push(SigHashType::All as u8);
+
+ htlc_timeout_tx.input[0].witness.push(Vec::new());
+ let htlc_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &local_tx.a_htlc_key, &local_tx.b_htlc_key, &local_tx.revocation_key);
+ htlc_timeout_tx.input[0].witness.push(htlc_script.clone().into_bytes());
+
+ add_dynamic_output!(htlc_timeout_tx, 0);
+ let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
+ pending_claims.push((htlc_timeout_tx.input[0].previous_output.clone(), (height_timer, TxMaterial::LocalHTLC { script: htlc_script, sigs: (*their_sig, *our_sig), preimage: None, amount: htlc.amount_msat / 1000}, 0, htlc.cltv_expiry, height)));
+ res.push(htlc_timeout_tx);
+ } else {
+ if let Some(payment_preimage) = self.payment_preimages.get(&htlc.payment_hash) {
+ log_trace!(self, "Broadcasting HTLC-Success transaction against local commitment transactions");
+ let mut htlc_success_tx = chan_utils::build_htlc_transaction(&local_tx.txid, local_tx.feerate_per_kw, self.their_to_self_delay.unwrap(), htlc, &local_tx.delayed_payment_key, &local_tx.revocation_key);
+
+ htlc_success_tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
+
+ htlc_success_tx.input[0].witness.push(their_sig.serialize_der().to_vec());
+ htlc_success_tx.input[0].witness[1].push(SigHashType::All as u8);
+ htlc_success_tx.input[0].witness.push(our_sig.serialize_der().to_vec());
+ htlc_success_tx.input[0].witness[2].push(SigHashType::All as u8);
+
+ htlc_success_tx.input[0].witness.push(payment_preimage.0.to_vec());
+ let htlc_script = chan_utils::get_htlc_redeemscript_with_explicit_keys(htlc, &local_tx.a_htlc_key, &local_tx.b_htlc_key, &local_tx.revocation_key);
+ htlc_success_tx.input[0].witness.push(htlc_script.clone().into_bytes());
+
+ add_dynamic_output!(htlc_success_tx, 0);
+ let height_timer = Self::get_height_timer(height, htlc.cltv_expiry);
+ pending_claims.push((htlc_success_tx.input[0].previous_output.clone(), (height_timer, TxMaterial::LocalHTLC { script: htlc_script, sigs: (*their_sig, *our_sig), preimage: Some(*payment_preimage), amount: htlc.amount_msat / 1000}, 0, htlc.cltv_expiry, height)));
+ res.push(htlc_success_tx);
+ }
+ }
+ watch_outputs.push(local_tx.tx.output[transaction_output_index as usize].clone());
+ } else { panic!("Should have sigs for non-dust local tx outputs!") }
+ }
+ }
+
+ (res, spendable_outputs, watch_outputs, pending_claims)
+ }
+
+ /// Attempts to claim any claimable HTLCs in a commitment transaction which was not (yet)
+ /// revoked using data in local_claimable_outpoints.
+ /// Should not be used if check_spend_revoked_transaction succeeds.
+ fn check_spend_local_transaction(&mut self, tx: &Transaction, height: u32) -> (Vec<Transaction>, Vec<SpendableOutputDescriptor>, (Sha256dHash, Vec<TxOut>)) {
+ let commitment_txid = tx.txid();
+ let mut local_txn = Vec::new();
+ let mut spendable_outputs = Vec::new();
+ let mut watch_outputs = Vec::new();
+
+ macro_rules! wait_threshold_conf {
+ ($height: expr, $source: expr, $commitment_tx: expr, $payment_hash: expr) => {
+ log_trace!(self, "Failing HTLC with payment_hash {} from {} local commitment tx due to broadcast of transaction, waiting confirmation (at height{})", log_bytes!($payment_hash.0), $commitment_tx, height + ANTI_REORG_DELAY - 1);
+ match self.onchain_events_waiting_threshold_conf.entry($height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let e = entry.get_mut();
+ e.retain(|ref event| {
+ match **event {
+ OnchainEvent::HTLCUpdate { ref htlc_update } => {
+ return htlc_update.0 != $source
+ },
+ _ => return true
+ }
+ });
+ e.push(OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)});
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: ($source, $payment_hash)}]);
+ }
+ }
+ }
+ }
+
+ macro_rules! append_onchain_update {
+ ($updates: expr) => {
+ local_txn.append(&mut $updates.0);
+ spendable_outputs.append(&mut $updates.1);
+ watch_outputs.append(&mut $updates.2);
+ for claim in $updates.3 {
+ match self.our_claim_txn_waiting_first_conf.entry(claim.0) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert(claim.1); }
+ }
+ }
+ }
+ }
+
+ // HTLCs set may differ between last and previous local commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward
+ let mut is_local_tx = false;
+
+ if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+ if local_tx.txid == commitment_txid {
+ is_local_tx = true;
+ log_trace!(self, "Got latest local commitment tx broadcast, searching for available HTLCs to claim");
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref latest_per_commitment_point, .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key), height));
+ },
+ Storage::Watchtower { .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, &None, &None, height));
+ }
+ }
+ }
+ }
+ if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx {
+ if local_tx.txid == commitment_txid {
+ is_local_tx = true;
+ log_trace!(self, "Got previous local commitment tx broadcast, searching for available HTLCs to claim");
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref prev_latest_per_commitment_point, .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key), height));
+ },
+ Storage::Watchtower { .. } => {
+ append_onchain_update!(self.broadcast_by_local_state(local_tx, &None, &None, height));
+ }
+ }
+ }
+ }
+
+ macro_rules! fail_dust_htlcs_after_threshold_conf {
+ ($local_tx: expr) => {
+ for &(ref htlc, _, ref source) in &$local_tx.htlc_outputs {
+ if htlc.transaction_output_index.is_none() {
+ if let &Some(ref source) = source {
+ wait_threshold_conf!(height, source.clone(), "lastest", htlc.payment_hash.clone());
+ }
+ }
+ }
+ }
+ }
+
+ if is_local_tx {
+ if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+ fail_dust_htlcs_after_threshold_conf!(local_tx);
+ }
+ if let &Some(ref local_tx) = &self.prev_local_signed_commitment_tx {
+ fail_dust_htlcs_after_threshold_conf!(local_tx);
+ }
+ }
+
+ (local_txn, spendable_outputs, (commitment_txid, watch_outputs))
+ }
+
+ /// Generate a spendable output event when closing_transaction get registered onchain.
+ fn check_spend_closing_transaction(&self, tx: &Transaction) -> Option<SpendableOutputDescriptor> {
+ if tx.input[0].sequence == 0xFFFFFFFF && !tx.input[0].witness.is_empty() && tx.input[0].witness.last().unwrap().len() == 71 {
+ match self.key_storage {
+ Storage::Local { ref shutdown_pubkey, .. } => {
+ let our_channel_close_key_hash = Hash160::hash(&shutdown_pubkey.serialize());
+ let shutdown_script = Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(&our_channel_close_key_hash[..]).into_script();
+ for (idx, output) in tx.output.iter().enumerate() {
+ if shutdown_script == output.script_pubkey {
+ return Some(SpendableOutputDescriptor::StaticOutput {
+ outpoint: BitcoinOutPoint { txid: tx.txid(), vout: idx as u32 },
+ output: output.clone(),
+ });
+ }
+ }
+ }
+ Storage::Watchtower { .. } => {
+ //TODO: we need to ensure an offline client will generate the event when it
+ // comes back online after only the watchtower saw the transaction
+ }
+ }
+ }
+ None
+ }
+
+ /// Used by ChannelManager deserialization to broadcast the latest local state if its copy of
+ /// the Channel was out-of-date. You may use it to get a broadcastable local toxic tx in case of
+ /// fallen-behind, i.e when receiving a channel_reestablish with a proof that our remote side knows
+ /// a higher revocation secret than the local commitment number we are aware of. Broadcasting these
+ /// transactions are UNSAFE, as they allow remote side to punish you. Nevertheless you may want to
+ /// broadcast them if remote don't close channel with his higher commitment transaction after a
+ /// substantial amount of time (a month or even a year) to get back funds. Best may be to contact
+ /// out-of-band the other node operator to coordinate with him if option is available to you.
+ /// In any-case, choice is up to the user.
+ pub fn get_latest_local_commitment_txn(&self) -> Vec<Transaction> {
+ if let &Some(ref local_tx) = &self.current_local_signed_commitment_tx {
+ let mut res = vec![local_tx.tx.clone()];
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref prev_latest_per_commitment_point, .. } => {
+ res.append(&mut self.broadcast_by_local_state(local_tx, prev_latest_per_commitment_point, &Some(*delayed_payment_base_key), 0).0);
+ // We throw away the generated waiting_first_conf data as we aren't (yet) confirmed and we don't actually know what the caller wants to do.
+ // The data will be re-generated and tracked in check_spend_local_transaction if we get a confirmation.
+ },
+ _ => panic!("Can only broadcast by local channelmonitor"),
+ };
+ res
+ } else {
+ Vec::new()
+ }
+ }
+
+ fn block_connected(&mut self, txn_matched: &[&Transaction], height: u32, block_hash: &Sha256dHash, broadcaster: &BroadcasterInterface, fee_estimator: &FeeEstimator)-> (Vec<(Sha256dHash, Vec<TxOut>)>, Vec<SpendableOutputDescriptor>, Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)>) {
+ let mut watch_outputs = Vec::new();
+ let mut spendable_outputs = Vec::new();
+ let mut htlc_updated = Vec::new();
+ for tx in txn_matched {
+ if tx.input.len() == 1 {
+ // Assuming our keys were not leaked (in which case we're screwed no matter what),
+ // commitment transactions and HTLC transactions will all only ever have one input,
+ // which is an easy way to filter out any potential non-matching txn for lazy
+ // filters.
+ let prevout = &tx.input[0].previous_output;
+ let mut txn: Vec<Transaction> = Vec::new();
+ let funding_txo = match self.key_storage {
+ Storage::Local { ref funding_info, .. } => {
+ funding_info.clone()
+ }
+ Storage::Watchtower { .. } => {
+ unimplemented!();
+ }
+ };
+ if funding_txo.is_none() || (prevout.txid == funding_txo.as_ref().unwrap().0.txid && prevout.vout == funding_txo.as_ref().unwrap().0.index as u32) {
+ if (tx.input[0].sequence >> 8*3) as u8 == 0x80 && (tx.lock_time >> 8*3) as u8 == 0x20 {
+ let (remote_txn, new_outputs, mut spendable_output) = self.check_spend_remote_transaction(tx, height, fee_estimator);
+ txn = remote_txn;
+ spendable_outputs.append(&mut spendable_output);
+ if !new_outputs.1.is_empty() {
+ watch_outputs.push(new_outputs);
+ }
+ if txn.is_empty() {
+ let (local_txn, mut spendable_output, new_outputs) = self.check_spend_local_transaction(tx, height);
+ spendable_outputs.append(&mut spendable_output);
+ txn = local_txn;
+ if !new_outputs.1.is_empty() {
+ watch_outputs.push(new_outputs);
+ }
+ }
+ }
+ if !funding_txo.is_none() && txn.is_empty() {
+ if let Some(spendable_output) = self.check_spend_closing_transaction(tx) {
+ spendable_outputs.push(spendable_output);
+ }
+ }
+ } else {
+ if let Some(&(commitment_number, _)) = self.remote_commitment_txn_on_chain.get(&prevout.txid) {
+ let (tx, spendable_output) = self.check_spend_remote_htlc(tx, commitment_number, height, fee_estimator);
+ if let Some(tx) = tx {
+ txn.push(tx);
+ }
+ if let Some(spendable_output) = spendable_output {
+ spendable_outputs.push(spendable_output);
+ }
+ }
+ }
+ for tx in txn.iter() {
+ broadcaster.broadcast_transaction(tx);
+ }
+ }
+ // While all commitment/HTLC-Success/HTLC-Timeout transactions have one input, HTLCs
+ // can also be resolved in a few other ways which can have more than one output. Thus,
+ // we call is_resolving_htlc_output here outside of the tx.input.len() == 1 check.
+ let mut updated = self.is_resolving_htlc_output(tx, height);
+ if updated.len() > 0 {
+ htlc_updated.append(&mut updated);
+ }
+ for inp in &tx.input {
+ if self.our_claim_txn_waiting_first_conf.contains_key(&inp.previous_output) {
+ match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let e = entry.get_mut();
+ e.retain(|ref event| {
+ match **event {
+ OnchainEvent::Claim { outpoint } => {
+ return outpoint != inp.previous_output
+ },
+ _ => return true
+ }
+ });
+ e.push(OnchainEvent::Claim { outpoint: inp.previous_output.clone()});
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![OnchainEvent::Claim { outpoint: inp.previous_output.clone()}]);
+ }
+ }
+ }
+ }
+ }
+ let mut pending_claims = Vec::new();
+ if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
+ if self.would_broadcast_at_height(height) {
+ broadcaster.broadcast_transaction(&cur_local_tx.tx);
+ match self.key_storage {
+ Storage::Local { ref delayed_payment_base_key, ref latest_per_commitment_point, .. } => {
+ let (txs, mut spendable_output, new_outputs, mut pending_txn) = self.broadcast_by_local_state(&cur_local_tx, latest_per_commitment_point, &Some(*delayed_payment_base_key), height);
+ spendable_outputs.append(&mut spendable_output);
+ pending_claims.append(&mut pending_txn);
+ if !new_outputs.is_empty() {
+ watch_outputs.push((cur_local_tx.txid.clone(), new_outputs));
+ }
+ for tx in txs {
+ broadcaster.broadcast_transaction(&tx);
+ }
+ },
+ Storage::Watchtower { .. } => {
+ let (txs, mut spendable_output, new_outputs, mut pending_txn) = self.broadcast_by_local_state(&cur_local_tx, &None, &None, height);
+ spendable_outputs.append(&mut spendable_output);
+ pending_claims.append(&mut pending_txn);
+ if !new_outputs.is_empty() {
+ watch_outputs.push((cur_local_tx.txid.clone(), new_outputs));
+ }
+ for tx in txs {
+ broadcaster.broadcast_transaction(&tx);
+ }
+ }
+ }
+ }
+ }
+ for claim in pending_claims {
+ match self.our_claim_txn_waiting_first_conf.entry(claim.0) {
+ hash_map::Entry::Occupied(_) => {},
+ hash_map::Entry::Vacant(entry) => { entry.insert(claim.1); }
+ }
+ }
+ if let Some(events) = self.onchain_events_waiting_threshold_conf.remove(&height) {
+ for ev in events {
+ match ev {
+ OnchainEvent::Claim { outpoint } => {
+ self.our_claim_txn_waiting_first_conf.remove(&outpoint);
+ },
+ OnchainEvent::HTLCUpdate { htlc_update } => {
+ log_trace!(self, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!((htlc_update.1).0));
+ htlc_updated.push((htlc_update.0, None, htlc_update.1));
+ },
+ }
+ }
+ }
+ //TODO: iter on buffered TxMaterial in our_claim_txn_waiting_first_conf, if block timer is expired generate a bumped claim tx (RBF or CPFP accordingly)
+ self.last_block_hash = block_hash.clone();
+ (watch_outputs, spendable_outputs, htlc_updated)
+ }
+
+ fn block_disconnected(&mut self, height: u32, block_hash: &Sha256dHash) {
+ if let Some(_) = self.onchain_events_waiting_threshold_conf.remove(&(height + ANTI_REORG_DELAY - 1)) {
+ //We may discard:
+ //- htlc update there as failure-trigger tx (revoked commitment tx, non-revoked commitment tx, HTLC-timeout tx) has been disconnected
+ //- our claim tx on a commitment tx output
+ }
+ self.our_claim_txn_waiting_first_conf.retain(|_, ref mut v| if v.3 == height { false } else { true });
+ self.last_block_hash = block_hash.clone();
+ }
+
+ pub(super) fn would_broadcast_at_height(&self, height: u32) -> bool {
+ // We need to consider all HTLCs which are:
+ // * in any unrevoked remote commitment transaction, as they could broadcast said
+ // transactions and we'd end up in a race, or
+ // * are in our latest local commitment transaction, as this is the thing we will
+ // broadcast if we go on-chain.
+ // Note that we consider HTLCs which were below dust threshold here - while they don't
+ // strictly imply that we need to fail the channel, we need to go ahead and fail them back
+ // to the source, and if we don't fail the channel we will have to ensure that the next
+ // updates that peer sends us are update_fails, failing the channel if not. It's probably
+ // easier to just fail the channel as this case should be rare enough anyway.
+ macro_rules! scan_commitment {
+ ($htlcs: expr, $local_tx: expr) => {
+ for ref htlc in $htlcs {
+ // For inbound HTLCs which we know the preimage for, we have to ensure we hit the
+ // chain with enough room to claim the HTLC without our counterparty being able to
+ // time out the HTLC first.
+ // For outbound HTLCs which our counterparty hasn't failed/claimed, our primary
+ // concern is being able to claim the corresponding inbound HTLC (on another
+ // channel) before it expires. In fact, we don't even really care if our
+ // counterparty here claims such an outbound HTLC after it expired as long as we
+ // can still claim the corresponding HTLC. Thus, to avoid needlessly hitting the
+ // chain when our counterparty is waiting for expiration to off-chain fail an HTLC
+ // we give ourselves a few blocks of headroom after expiration before going
+ // on-chain for an expired HTLC.
+ // Note that, to avoid a potential attack whereby a node delays claiming an HTLC
+ // from us until we've reached the point where we go on-chain with the
+ // corresponding inbound HTLC, we must ensure that outbound HTLCs go on chain at
+ // least CLTV_CLAIM_BUFFER blocks prior to the inbound HTLC.
+ // aka outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS == height - CLTV_CLAIM_BUFFER
+ // inbound_cltv == height + CLTV_CLAIM_BUFFER
+ // outbound_cltv + LATENCY_GRACE_PERIOD_BLOCKS + CLTV_CLAIM_BUFFER <= inbound_cltv - CLTV_CLAIM_BUFFER
+ // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= inbound_cltv - outbound_cltv
+ // CLTV_EXPIRY_DELTA <= inbound_cltv - outbound_cltv (by check in ChannelManager::decode_update_add_htlc_onion)
+ // LATENCY_GRACE_PERIOD_BLOCKS + 2*CLTV_CLAIM_BUFFER <= CLTV_EXPIRY_DELTA
+ // The final, above, condition is checked for statically in channelmanager
+ // with CHECK_CLTV_EXPIRY_SANITY_2.
+ let htlc_outbound = $local_tx == htlc.offered;
+ if ( htlc_outbound && htlc.cltv_expiry + LATENCY_GRACE_PERIOD_BLOCKS <= height) ||
+ (!htlc_outbound && htlc.cltv_expiry <= height + CLTV_CLAIM_BUFFER && self.payment_preimages.contains_key(&htlc.payment_hash)) {
+ log_info!(self, "Force-closing channel due to {} HTLC timeout, HTLC expiry is {}", if htlc_outbound { "outbound" } else { "inbound "}, htlc.cltv_expiry);
+ return true;
+ }
+ }
+ }
+ }
+
+ if let Some(ref cur_local_tx) = self.current_local_signed_commitment_tx {
+ scan_commitment!(cur_local_tx.htlc_outputs.iter().map(|&(ref a, _, _)| a), true);
+ }
+
+ if let Storage::Local { ref current_remote_commitment_txid, ref prev_remote_commitment_txid, .. } = self.key_storage {
+ if let &Some(ref txid) = current_remote_commitment_txid {
+ if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) {
+ scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
+ }
+ }
+ if let &Some(ref txid) = prev_remote_commitment_txid {
+ if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(txid) {
+ scan_commitment!(htlc_outputs.iter().map(|&(ref a, _)| a), false);
+ }
+ }
+ }
+
+ false
+ }
+
+ /// Check if any transaction broadcasted is resolving HTLC output by a success or timeout on a local
+ /// or remote commitment tx, if so send back the source, preimage if found and payment_hash of resolved HTLC
+ fn is_resolving_htlc_output(&mut self, tx: &Transaction, height: u32) -> Vec<(HTLCSource, Option<PaymentPreimage>, PaymentHash)> {
+ let mut htlc_updated = Vec::new();
+
+ 'outer_loop: for input in &tx.input {
+ let mut payment_data = None;
+ let revocation_sig_claim = (input.witness.len() == 3 && input.witness[2].len() == OFFERED_HTLC_SCRIPT_WEIGHT && input.witness[1].len() == 33)
+ || (input.witness.len() == 3 && input.witness[2].len() == ACCEPTED_HTLC_SCRIPT_WEIGHT && input.witness[1].len() == 33);
+ let accepted_preimage_claim = input.witness.len() == 5 && input.witness[4].len() == ACCEPTED_HTLC_SCRIPT_WEIGHT;
+ let offered_preimage_claim = input.witness.len() == 3 && input.witness[2].len() == OFFERED_HTLC_SCRIPT_WEIGHT;
+
+ macro_rules! log_claim {
+ ($tx_info: expr, $local_tx: expr, $htlc: expr, $source_avail: expr) => {
+ // We found the output in question, but aren't failing it backwards
+ // as we have no corresponding source and no valid remote commitment txid
+ // to try a weak source binding with same-hash, same-value still-valid offered HTLC.
+ // This implies either it is an inbound HTLC or an outbound HTLC on a revoked transaction.
+ let outbound_htlc = $local_tx == $htlc.offered;
+ if ($local_tx && revocation_sig_claim) ||
+ (outbound_htlc && !$source_avail && (accepted_preimage_claim || offered_preimage_claim)) {
+ log_error!(self, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}!",
+ $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
+ if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
+ if revocation_sig_claim { "revocation sig" } else { "preimage claim after we'd passed the HTLC resolution back" });
+ } else {
+ log_info!(self, "Input spending {} ({}:{}) in {} resolves {} HTLC with payment hash {} with {}",
+ $tx_info, input.previous_output.txid, input.previous_output.vout, tx.txid(),
+ if outbound_htlc { "outbound" } else { "inbound" }, log_bytes!($htlc.payment_hash.0),
+ if revocation_sig_claim { "revocation sig" } else if accepted_preimage_claim || offered_preimage_claim { "preimage" } else { "timeout" });
+ }
+ }
+ }
+
+ macro_rules! check_htlc_valid_remote {
+ ($remote_txid: expr, $htlc_output: expr) => {
+ if let &Some(txid) = $remote_txid {
+ for &(ref pending_htlc, ref pending_source) in self.remote_claimable_outpoints.get(&txid).unwrap() {
+ if pending_htlc.payment_hash == $htlc_output.payment_hash && pending_htlc.amount_msat == $htlc_output.amount_msat {
+ if let &Some(ref source) = pending_source {
+ log_claim!("revoked remote commitment tx", false, pending_htlc, true);
+ payment_data = Some(((**source).clone(), $htlc_output.payment_hash));
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ macro_rules! scan_commitment {
+ ($htlcs: expr, $tx_info: expr, $local_tx: expr) => {
+ for (ref htlc_output, source_option) in $htlcs {
+ if Some(input.previous_output.vout) == htlc_output.transaction_output_index {
+ if let Some(ref source) = source_option {
+ log_claim!($tx_info, $local_tx, htlc_output, true);
+ // We have a resolution of an HTLC either from one of our latest
+ // local commitment transactions or an unrevoked remote commitment
+ // transaction. This implies we either learned a preimage, the HTLC
+ // has timed out, or we screwed up. In any case, we should now
+ // resolve the source HTLC with the original sender.
+ payment_data = Some(((*source).clone(), htlc_output.payment_hash));
+ } else if !$local_tx {
+ if let Storage::Local { ref current_remote_commitment_txid, .. } = self.key_storage {
+ check_htlc_valid_remote!(current_remote_commitment_txid, htlc_output);
+ }
+ if payment_data.is_none() {
+ if let Storage::Local { ref prev_remote_commitment_txid, .. } = self.key_storage {
+ check_htlc_valid_remote!(prev_remote_commitment_txid, htlc_output);
+ }
+ }
+ }
+ if payment_data.is_none() {
+ log_claim!($tx_info, $local_tx, htlc_output, false);
+ continue 'outer_loop;
+ }
+ }
+ }
+ }
+ }
+
+ if let Some(ref current_local_signed_commitment_tx) = self.current_local_signed_commitment_tx {
+ if input.previous_output.txid == current_local_signed_commitment_tx.txid {
+ scan_commitment!(current_local_signed_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())),
+ "our latest local commitment tx", true);
+ }
+ }
+ if let Some(ref prev_local_signed_commitment_tx) = self.prev_local_signed_commitment_tx {
+ if input.previous_output.txid == prev_local_signed_commitment_tx.txid {
+ scan_commitment!(prev_local_signed_commitment_tx.htlc_outputs.iter().map(|&(ref a, _, ref b)| (a, b.as_ref())),
+ "our previous local commitment tx", true);
+ }
+ }
+ if let Some(ref htlc_outputs) = self.remote_claimable_outpoints.get(&input.previous_output.txid) {
+ scan_commitment!(htlc_outputs.iter().map(|&(ref a, ref b)| (a, (b.as_ref().clone()).map(|boxed| &**boxed))),
+ "remote commitment tx", false);
+ }
+
+ // Check that scan_commitment, above, decided there is some source worth relaying an
+ // HTLC resolution backwards to and figure out whether we learned a preimage from it.
+ if let Some((source, payment_hash)) = payment_data {
+ let mut payment_preimage = PaymentPreimage([0; 32]);
+ if accepted_preimage_claim {
+ payment_preimage.0.copy_from_slice(&input.witness[3]);
+ htlc_updated.push((source, Some(payment_preimage), payment_hash));
+ } else if offered_preimage_claim {
+ payment_preimage.0.copy_from_slice(&input.witness[1]);
+ htlc_updated.push((source, Some(payment_preimage), payment_hash));
+ } else {
+ log_info!(self, "Failing HTLC with payment_hash {} timeout by a spend tx, waiting for confirmation (at height{})", log_bytes!(payment_hash.0), height + ANTI_REORG_DELAY - 1);
+ match self.onchain_events_waiting_threshold_conf.entry(height + ANTI_REORG_DELAY - 1) {
+ hash_map::Entry::Occupied(mut entry) => {
+ let e = entry.get_mut();
+ e.retain(|ref event| {
+ match **event {
+ OnchainEvent::HTLCUpdate { ref htlc_update } => {
+ return htlc_update.0 != source
+ },
+ _ => return true
+ }
+ });
+ e.push(OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)});
+ }
+ hash_map::Entry::Vacant(entry) => {
+ entry.insert(vec![OnchainEvent::HTLCUpdate { htlc_update: (source, payment_hash)}]);
+ }
+ }
+ }
+ }
+ }
+ htlc_updated
+ }
+}
+
+const MAX_ALLOC_SIZE: usize = 64*1024;
+
+impl<R: ::std::io::Read> ReadableArgs<R, Arc<Logger>> for (Sha256dHash, ChannelMonitor) {
+ fn read(reader: &mut R, logger: Arc<Logger>) -> Result<Self, DecodeError> {
+ let secp_ctx = Secp256k1::new();
+ macro_rules! unwrap_obj {
+ ($key: expr) => {
+ match $key {
+ Ok(res) => res,
+ Err(_) => return Err(DecodeError::InvalidValue),
+ }
+ }
+ }
+
+ let _ver: u8 = Readable::read(reader)?;
+ let min_ver: u8 = Readable::read(reader)?;
+ if min_ver > SERIALIZATION_VERSION {
+ return Err(DecodeError::UnknownVersion);
+ }
+
+ let commitment_transaction_number_obscure_factor = <U48 as Readable<R>>::read(reader)?.0;
+
+ let key_storage = match <u8 as Readable<R>>::read(reader)? {
+ 0 => {
+ let revocation_base_key = Readable::read(reader)?;
+ let htlc_base_key = Readable::read(reader)?;
+ let delayed_payment_base_key = Readable::read(reader)?;
+ let payment_base_key = Readable::read(reader)?;
+ let shutdown_pubkey = Readable::read(reader)?;
+ let prev_latest_per_commitment_point = Readable::read(reader)?;
+ let latest_per_commitment_point = Readable::read(reader)?;
+ // Technically this can fail and serialize fail a round-trip, but only for serialization of
+ // barely-init'd ChannelMonitors that we can't do anything with.
+ let outpoint = OutPoint {
+ txid: Readable::read(reader)?,
+ index: Readable::read(reader)?,
+ };
+ let funding_info = Some((outpoint, Readable::read(reader)?));
+ let current_remote_commitment_txid = Readable::read(reader)?;
+ let prev_remote_commitment_txid = Readable::read(reader)?;
+ Storage::Local {
+ revocation_base_key,
+ htlc_base_key,
+ delayed_payment_base_key,
+ payment_base_key,
+ shutdown_pubkey,
+ prev_latest_per_commitment_point,
+ latest_per_commitment_point,
+ funding_info,
+ current_remote_commitment_txid,
+ prev_remote_commitment_txid,
+ }
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ };
+
+ let their_htlc_base_key = Some(Readable::read(reader)?);
+ let their_delayed_payment_base_key = Some(Readable::read(reader)?);
+
+ let their_cur_revocation_points = {
+ let first_idx = <U48 as Readable<R>>::read(reader)?.0;
+ if first_idx == 0 {
+ None
+ } else {
+ let first_point = Readable::read(reader)?;
+ let second_point_slice: [u8; 33] = Readable::read(reader)?;
+ if second_point_slice[0..32] == [0; 32] && second_point_slice[32] == 0 {
+ Some((first_idx, first_point, None))
+ } else {
+ Some((first_idx, first_point, Some(unwrap_obj!(PublicKey::from_slice(&second_point_slice)))))
+ }
+ }
+ };
+
+ let our_to_self_delay: u16 = Readable::read(reader)?;
+ let their_to_self_delay: Option<u16> = Some(Readable::read(reader)?);
+
+ let mut old_secrets = [([0; 32], 1 << 48); 49];
+ for &mut (ref mut secret, ref mut idx) in old_secrets.iter_mut() {
+ *secret = Readable::read(reader)?;
+ *idx = Readable::read(reader)?;
+ }
+
+ macro_rules! read_htlc_in_commitment {
+ () => {
+ {
+ let offered: bool = Readable::read(reader)?;
+ let amount_msat: u64 = Readable::read(reader)?;
+ let cltv_expiry: u32 = Readable::read(reader)?;
+ let payment_hash: PaymentHash = Readable::read(reader)?;
+ let transaction_output_index: Option<u32> = Readable::read(reader)?;
+
+ HTLCOutputInCommitment {
+ offered, amount_msat, cltv_expiry, payment_hash, transaction_output_index
+ }
+ }
+ }
+ }
+
+ let remote_claimable_outpoints_len: u64 = Readable::read(reader)?;
+ let mut remote_claimable_outpoints = HashMap::with_capacity(cmp::min(remote_claimable_outpoints_len as usize, MAX_ALLOC_SIZE / 64));
+ for _ in 0..remote_claimable_outpoints_len {
+ let txid: Sha256dHash = Readable::read(reader)?;
+ let htlcs_count: u64 = Readable::read(reader)?;
+ let mut htlcs = Vec::with_capacity(cmp::min(htlcs_count as usize, MAX_ALLOC_SIZE / 32));
+ for _ in 0..htlcs_count {
+ htlcs.push((read_htlc_in_commitment!(), <Option<HTLCSource> as Readable<R>>::read(reader)?.map(|o: HTLCSource| Box::new(o))));
+ }
+ if let Some(_) = remote_claimable_outpoints.insert(txid, htlcs) {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ let remote_commitment_txn_on_chain_len: u64 = Readable::read(reader)?;
+ let mut remote_commitment_txn_on_chain = HashMap::with_capacity(cmp::min(remote_commitment_txn_on_chain_len as usize, MAX_ALLOC_SIZE / 32));
+ for _ in 0..remote_commitment_txn_on_chain_len {
+ let txid: Sha256dHash = Readable::read(reader)?;
+ let commitment_number = <U48 as Readable<R>>::read(reader)?.0;
+ let outputs_count = <u64 as Readable<R>>::read(reader)?;
+ let mut outputs = Vec::with_capacity(cmp::min(outputs_count as usize, MAX_ALLOC_SIZE / 8));
+ for _ in 0..outputs_count {
+ outputs.push(Readable::read(reader)?);
+ }
+ if let Some(_) = remote_commitment_txn_on_chain.insert(txid, (commitment_number, outputs)) {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ let remote_hash_commitment_number_len: u64 = Readable::read(reader)?;
+ let mut remote_hash_commitment_number = HashMap::with_capacity(cmp::min(remote_hash_commitment_number_len as usize, MAX_ALLOC_SIZE / 32));
+ for _ in 0..remote_hash_commitment_number_len {
+ let payment_hash: PaymentHash = Readable::read(reader)?;
+ let commitment_number = <U48 as Readable<R>>::read(reader)?.0;
+ if let Some(_) = remote_hash_commitment_number.insert(payment_hash, commitment_number) {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ macro_rules! read_local_tx {
+ () => {
+ {
+ let tx = match Transaction::consensus_decode(reader.by_ref()) {
+ Ok(tx) => tx,
+ Err(e) => match e {
+ encode::Error::Io(ioe) => return Err(DecodeError::Io(ioe)),
+ _ => return Err(DecodeError::InvalidValue),
+ },
+ };
+
+ if tx.input.is_empty() {
+ // Ensure tx didn't hit the 0-input ambiguity case.
+ return Err(DecodeError::InvalidValue);
+ }
+
+ let revocation_key = Readable::read(reader)?;
+ let a_htlc_key = Readable::read(reader)?;
+ let b_htlc_key = Readable::read(reader)?;
+ let delayed_payment_key = Readable::read(reader)?;
+ let feerate_per_kw: u64 = Readable::read(reader)?;
+
+ let htlcs_len: u64 = Readable::read(reader)?;
+ let mut htlcs = Vec::with_capacity(cmp::min(htlcs_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..htlcs_len {
+ let htlc = read_htlc_in_commitment!();
+ let sigs = match <u8 as Readable<R>>::read(reader)? {
+ 0 => None,
+ 1 => Some((Readable::read(reader)?, Readable::read(reader)?)),
+ _ => return Err(DecodeError::InvalidValue),
+ };
+ htlcs.push((htlc, sigs, Readable::read(reader)?));
+ }
+
+ LocalSignedTx {
+ txid: tx.txid(),
+ tx, revocation_key, a_htlc_key, b_htlc_key, delayed_payment_key, feerate_per_kw,
+ htlc_outputs: htlcs
+ }
+ }
+ }
+ }
+
+ let prev_local_signed_commitment_tx = match <u8 as Readable<R>>::read(reader)? {
+ 0 => None,
+ 1 => {
+ Some(read_local_tx!())
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ };
+
+ let current_local_signed_commitment_tx = match <u8 as Readable<R>>::read(reader)? {
+ 0 => None,
+ 1 => {
+ Some(read_local_tx!())
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ };
+
+ let current_remote_commitment_number = <U48 as Readable<R>>::read(reader)?.0;
+
+ let payment_preimages_len: u64 = Readable::read(reader)?;
+ let mut payment_preimages = HashMap::with_capacity(cmp::min(payment_preimages_len as usize, MAX_ALLOC_SIZE / 32));
+ for _ in 0..payment_preimages_len {
+ let preimage: PaymentPreimage = Readable::read(reader)?;
+ let hash = PaymentHash(Sha256::hash(&preimage.0[..]).into_inner());
+ if let Some(_) = payment_preimages.insert(hash, preimage) {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
+ let last_block_hash: Sha256dHash = Readable::read(reader)?;
+ let destination_script = Readable::read(reader)?;
+ let to_remote_rescue = match <u8 as Readable<R>>::read(reader)? {
+ 0 => None,
+ 1 => {
+ let to_remote_script = Readable::read(reader)?;
+ let local_key = Readable::read(reader)?;
+ Some((to_remote_script, local_key))
+ }
+ _ => return Err(DecodeError::InvalidValue),
+ };
+
+ let our_claim_txn_waiting_first_conf_len: u64 = Readable::read(reader)?;
+ let mut our_claim_txn_waiting_first_conf = HashMap::with_capacity(cmp::min(our_claim_txn_waiting_first_conf_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..our_claim_txn_waiting_first_conf_len {
+ let outpoint = Readable::read(reader)?;
+ let height_target = Readable::read(reader)?;
+ let tx_material = match <u8 as Readable<R>>::read(reader)? {
+ 0 => {
+ let script = Readable::read(reader)?;
+ let pubkey = Readable::read(reader)?;
+ let key = Readable::read(reader)?;
+ let is_htlc = match <u8 as Readable<R>>::read(reader)? {
+ 0 => true,
+ 1 => false,
+ _ => return Err(DecodeError::InvalidValue),
+ };
+ let amount = Readable::read(reader)?;
+ TxMaterial::Revoked {
+ script,
+ pubkey,
+ key,
+ is_htlc,
+ amount
+ }
+ },
+ 1 => {
+ let script = Readable::read(reader)?;
+ let key = Readable::read(reader)?;
+ let preimage = Readable::read(reader)?;
+ let amount = Readable::read(reader)?;
+ TxMaterial::RemoteHTLC {
+ script,
+ key,
+ preimage,
+ amount
+ }
+ },
+ 2 => {
+ let script = Readable::read(reader)?;
+ let their_sig = Readable::read(reader)?;
+ let our_sig = Readable::read(reader)?;
+ let preimage = Readable::read(reader)?;
+ let amount = Readable::read(reader)?;
+ TxMaterial::LocalHTLC {
+ script,
+ sigs: (their_sig, our_sig),
+ preimage,
+ amount
+ }
+ }
+ _ => return Err(DecodeError::InvalidValue),
+ };
+ let last_fee = Readable::read(reader)?;
+ let timelock_expiration = Readable::read(reader)?;
+ let height = Readable::read(reader)?;
+ our_claim_txn_waiting_first_conf.insert(outpoint, (height_target, tx_material, last_fee, timelock_expiration, height));
+ }
+
+ let waiting_threshold_conf_len: u64 = Readable::read(reader)?;
+ let mut onchain_events_waiting_threshold_conf = HashMap::with_capacity(cmp::min(waiting_threshold_conf_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..waiting_threshold_conf_len {
+ let height_target = Readable::read(reader)?;
+ let events_len: u64 = Readable::read(reader)?;
+ let mut events = Vec::with_capacity(cmp::min(events_len as usize, MAX_ALLOC_SIZE / 128));
+ for _ in 0..events_len {
+ let ev = match <u8 as Readable<R>>::read(reader)? {
+ 0 => {
+ let outpoint = Readable::read(reader)?;
+ OnchainEvent::Claim {
+ outpoint
+ }
+ },
+ 1 => {
+ let htlc_source = Readable::read(reader)?;
+ let hash = Readable::read(reader)?;
+ OnchainEvent::HTLCUpdate {
+ htlc_update: (htlc_source, hash)
+ }
+ },
+ _ => return Err(DecodeError::InvalidValue),
+ };
+ events.push(ev);
+ }
+ onchain_events_waiting_threshold_conf.insert(height_target, events);
+ }
+
+ Ok((last_block_hash.clone(), ChannelMonitor {
+ commitment_transaction_number_obscure_factor,
+
+ key_storage,
+ their_htlc_base_key,
+ their_delayed_payment_base_key,
+ their_cur_revocation_points,
+
+ our_to_self_delay,
+ their_to_self_delay,
+
+ old_secrets,
+ remote_claimable_outpoints,
+ remote_commitment_txn_on_chain,
+ remote_hash_commitment_number,
+
+ prev_local_signed_commitment_tx,
+ current_local_signed_commitment_tx,
+ current_remote_commitment_number,
+
+ payment_preimages,
+
+ destination_script,
+ to_remote_rescue,
+
+ our_claim_txn_waiting_first_conf,
+
+ onchain_events_waiting_threshold_conf,
+
+ last_block_hash,
+ secp_ctx,
+ logger,
+ }))
+ }
+
+}
+
+#[cfg(test)]
+mod tests {
+ use bitcoin::blockdata::script::{Script, Builder};
+ use bitcoin::blockdata::opcodes;
+ use bitcoin::blockdata::transaction::{Transaction, TxIn, TxOut, SigHashType};
+ use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint;
+ use bitcoin::util::bip143;
+ use bitcoin_hashes::Hash;
+ use bitcoin_hashes::sha256::Hash as Sha256;
+ use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+ use bitcoin_hashes::hex::FromHex;
+ use hex;
+ use ln::channelmanager::{PaymentPreimage, PaymentHash};
+ use ln::channelmonitor::{ChannelMonitor, InputDescriptors};
+ use ln::chan_utils;
+ use ln::chan_utils::{HTLCOutputInCommitment, TxCreationKeys};
+ use util::test_utils::TestLogger;
+ use secp256k1::key::{SecretKey,PublicKey};
+ use secp256k1::Secp256k1;
+ use rand::{thread_rng,Rng};
+ use std::sync::Arc;
+
+ #[test]
+ fn test_per_commitment_storage() {
+ // Test vectors from BOLT 3:
+ let mut secrets: Vec<[u8; 32]> = Vec::new();
+ let mut monitor: ChannelMonitor;
+ let secp_ctx = Secp256k1::new();
+ let logger = Arc::new(TestLogger::new());
+
+ macro_rules! test_secrets {
+ () => {
+ let mut idx = 281474976710655;
+ for secret in secrets.iter() {
+ assert_eq!(monitor.get_secret(idx).unwrap(), *secret);
+ idx -= 1;
+ }
+ assert_eq!(monitor.get_min_seen_secret(), idx + 1);
+ assert!(monitor.get_secret(idx).is_none());
+ };
+ }
+
+ {
+ // insert_secret correct sequence
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+ }
+
+ {
+ // insert_secret #1 incorrect
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+
+ {
+ // insert_secret #2 incorrect (#1 derived from incorrect)
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+
+ {
+ // insert_secret #3 incorrect
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+
+ {
+ // insert_secret #4 incorrect (1,2,3 derived from incorrect)
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("dddc3a8d14fddf2b68fa8c7fbad2748274937479dd0f8930d5ebb4ab6bd866a3").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c51a18b13e8527e579ec56365482c62f180b7d5760b46e9477dae59e87ed423a").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("ba65d7b0ef55a3ba300d4e87af29868f394f8f138d78a7011669c79b37b936f4").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+
+ {
+ // insert_secret #5 incorrect
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+
+ {
+ // insert_secret #6 incorrect (5 derived from incorrect)
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("631373ad5f9ef654bb3dade742d09504c567edd24320d2fcd68e3cc47e2ff6a6").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("b7e76a83668bde38b373970155c868a653304308f9896692f904a23731224bb1").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+
+ {
+ // insert_secret #7 incorrect
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("e7971de736e01da8ed58b94c2fc216cb1dca9e326f3a96e7194fe8ea8af6c0a3").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("05cde6323d949933f7f7b78776bcc1ea6d9b31447732e3802e1f7ac44b650e17").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+
+ {
+ // insert_secret #8 incorrect
+ monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ secrets.clear();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("c65716add7aa98ba7acb236352d665cab17345fe45b55fb879ff80e6bd0c41dd").unwrap());
+ monitor.provide_secret(281474976710651, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("969660042a28f32d9be17344e09374b379962d03db1574df5a8a5a47e19ce3f2").unwrap());
+ monitor.provide_secret(281474976710650, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a5a64476122ca0925fb344bdc1854c1c0a59fc614298e50a33e331980a220f32").unwrap());
+ monitor.provide_secret(281474976710649, secrets.last().unwrap().clone()).unwrap();
+ test_secrets!();
+
+ secrets.push([0; 32]);
+ secrets.last_mut().unwrap()[0..32].clone_from_slice(&hex::decode("a7efbc61aac46d34f77778bac22c8a20c6a46ca460addc49009bda875ec88fa4").unwrap());
+ assert_eq!(monitor.provide_secret(281474976710648, secrets.last().unwrap().clone()).unwrap_err().0,
+ "Previous secret did not match new one");
+ }
+ }
+
+ #[test]
+ fn test_prune_preimages() {
+ let secp_ctx = Secp256k1::new();
+ let logger = Arc::new(TestLogger::new());
+
+ let dummy_key = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ macro_rules! dummy_keys {
+ () => {
+ {
+ TxCreationKeys {
+ per_commitment_point: dummy_key.clone(),
+ revocation_key: dummy_key.clone(),
+ a_htlc_key: dummy_key.clone(),
+ b_htlc_key: dummy_key.clone(),
+ a_delayed_payment_key: dummy_key.clone(),
+ b_payment_key: dummy_key.clone(),
+ }
+ }
+ }
+ }
+ let dummy_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+
+ let mut preimages = Vec::new();
+ {
+ let mut rng = thread_rng();
+ for _ in 0..20 {
+ let mut preimage = PaymentPreimage([0; 32]);
+ rng.fill_bytes(&mut preimage.0[..]);
+ let hash = PaymentHash(Sha256::hash(&preimage.0[..]).into_inner());
+ preimages.push((preimage, hash));
+ }
+ }
+
+ macro_rules! preimages_slice_to_htlc_outputs {
+ ($preimages_slice: expr) => {
+ {
+ let mut res = Vec::new();
+ for (idx, preimage) in $preimages_slice.iter().enumerate() {
+ res.push((HTLCOutputInCommitment {
+ offered: true,
+ amount_msat: 0,
+ cltv_expiry: 0,
+ payment_hash: preimage.1.clone(),
+ transaction_output_index: Some(idx as u32),
+ }, None));
+ }
+ res
+ }
+ }
+ }
+ macro_rules! preimages_to_local_htlcs {
+ ($preimages_slice: expr) => {
+ {
+ let mut inp = preimages_slice_to_htlc_outputs!($preimages_slice);
+ let res: Vec<_> = inp.drain(..).map(|e| { (e.0, None, e.1) }).collect();
+ res
+ }
+ }
+ }
+
+ macro_rules! test_preimages_exist {
+ ($preimages_slice: expr, $monitor: expr) => {
+ for preimage in $preimages_slice {
+ assert!($monitor.payment_preimages.contains_key(&preimage.1));
+ }
+ }
+ }
+
+ // Prune with one old state and a local commitment tx holding a few overlaps with the
+ // old state.
+ let mut monitor = ChannelMonitor::new(&SecretKey::from_slice(&[42; 32]).unwrap(), &SecretKey::from_slice(&[43; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &SecretKey::from_slice(&[44; 32]).unwrap(), &PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[45; 32]).unwrap()), 0, Script::new(), logger.clone());
+ monitor.set_their_to_self_delay(10);
+
+ monitor.provide_latest_local_commitment_tx_info(dummy_tx.clone(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..10]));
+ monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[5..15]), 281474976710655, dummy_key);
+ monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[15..20]), 281474976710654, dummy_key);
+ monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[17..20]), 281474976710653, dummy_key);
+ monitor.provide_latest_remote_commitment_tx_info(&dummy_tx, preimages_slice_to_htlc_outputs!(preimages[18..20]), 281474976710652, dummy_key);
+ for &(ref preimage, ref hash) in preimages.iter() {
+ monitor.provide_payment_preimage(hash, preimage);
+ }
+
+ // Now provide a secret, pruning preimages 10-15
+ let mut secret = [0; 32];
+ secret[0..32].clone_from_slice(&hex::decode("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap());
+ monitor.provide_secret(281474976710655, secret.clone()).unwrap();
+ assert_eq!(monitor.payment_preimages.len(), 15);
+ test_preimages_exist!(&preimages[0..10], monitor);
+ test_preimages_exist!(&preimages[15..20], monitor);
+
+ // Now provide a further secret, pruning preimages 15-17
+ secret[0..32].clone_from_slice(&hex::decode("c7518c8ae4660ed02894df8976fa1a3659c1a8b4b5bec0c4b872abeba4cb8964").unwrap());
+ monitor.provide_secret(281474976710654, secret.clone()).unwrap();
+ assert_eq!(monitor.payment_preimages.len(), 13);
+ test_preimages_exist!(&preimages[0..10], monitor);
+ test_preimages_exist!(&preimages[17..20], monitor);
+
+ // Now update local commitment tx info, pruning only element 18 as we still care about the
+ // previous commitment tx's preimages too
+ monitor.provide_latest_local_commitment_tx_info(dummy_tx.clone(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..5]));
+ secret[0..32].clone_from_slice(&hex::decode("2273e227a5b7449b6e70f1fb4652864038b1cbf9cd7c043a7d6456b7fc275ad8").unwrap());
+ monitor.provide_secret(281474976710653, secret.clone()).unwrap();
+ assert_eq!(monitor.payment_preimages.len(), 12);
+ test_preimages_exist!(&preimages[0..10], monitor);
+ test_preimages_exist!(&preimages[18..20], monitor);
+
+ // But if we do it again, we'll prune 5-10
+ monitor.provide_latest_local_commitment_tx_info(dummy_tx.clone(), dummy_keys!(), 0, preimages_to_local_htlcs!(preimages[0..3]));
+ secret[0..32].clone_from_slice(&hex::decode("27cddaa5624534cb6cb9d7da077cf2b22ab21e9b506fd4998a51d54502e99116").unwrap());
+ monitor.provide_secret(281474976710652, secret.clone()).unwrap();
+ assert_eq!(monitor.payment_preimages.len(), 5);
+ test_preimages_exist!(&preimages[0..5], monitor);
+ }
+
+ #[test]
+ fn test_claim_txn_weight_computation() {
+ // We test Claim txn weight, knowing that we want expected weigth and
+ // not actual case to avoid sigs and time-lock delays hell variances.
+
+ let secp_ctx = Secp256k1::new();
+ let privkey = SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap();
+ let pubkey = PublicKey::from_secret_key(&secp_ctx, &privkey);
+ let mut sum_actual_sigs = 0;
+
+ macro_rules! sign_input {
+ ($sighash_parts: expr, $input: expr, $idx: expr, $amount: expr, $input_type: expr, $sum_actual_sigs: expr) => {
+ let htlc = HTLCOutputInCommitment {
+ offered: if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::OfferedHTLC { true } else { false },
+ amount_msat: 0,
+ cltv_expiry: 2 << 16,
+ payment_hash: PaymentHash([1; 32]),
+ transaction_output_index: Some($idx),
+ };
+ let redeem_script = if *$input_type == InputDescriptors::RevokedOutput { chan_utils::get_revokeable_redeemscript(&pubkey, 256, &pubkey) } else { chan_utils::get_htlc_redeemscript_with_explicit_keys(&htlc, &pubkey, &pubkey, &pubkey) };
+ let sighash = hash_to_message!(&$sighash_parts.sighash_all(&$input, &redeem_script, $amount)[..]);
+ let sig = secp_ctx.sign(&sighash, &privkey);
+ $input.witness.push(sig.serialize_der().to_vec());
+ $input.witness[0].push(SigHashType::All as u8);
+ sum_actual_sigs += $input.witness[0].len();
+ if *$input_type == InputDescriptors::RevokedOutput {
+ $input.witness.push(vec!(1));
+ } else if *$input_type == InputDescriptors::RevokedOfferedHTLC || *$input_type == InputDescriptors::RevokedReceivedHTLC {
+ $input.witness.push(pubkey.clone().serialize().to_vec());
+ } else if *$input_type == InputDescriptors::ReceivedHTLC {
+ $input.witness.push(vec![0]);
+ } else {
+ $input.witness.push(PaymentPreimage([1; 32]).0.to_vec());
+ }
+ $input.witness.push(redeem_script.into_bytes());
+ println!("witness[0] {}", $input.witness[0].len());
+ println!("witness[1] {}", $input.witness[1].len());
+ println!("witness[2] {}", $input.witness[2].len());
+ }
+ }
+
+ let script_pubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script();
+ let txid = Sha256dHash::from_hex("56944c5d3f98413ef45cf54545538103cc9f298e0575820ad3591376e2e0f65d").unwrap();
+
+ // Justice tx with 1 to_local, 2 revoked offered HTLCs, 1 revoked received HTLCs
+ let mut claim_tx = Transaction { version: 0, lock_time: 0, input: Vec::new(), output: Vec::new() };
+ for i in 0..4 {
+ claim_tx.input.push(TxIn {
+ previous_output: BitcoinOutPoint {
+ txid,
+ vout: i,
+ },
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ });
+ }
+ claim_tx.output.push(TxOut {
+ script_pubkey: script_pubkey.clone(),
+ value: 0,
+ });
+ let base_weight = claim_tx.get_weight();
+ let sighash_parts = bip143::SighashComponents::new(&claim_tx);
+ let inputs_des = vec![InputDescriptors::RevokedOutput, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedOfferedHTLC, InputDescriptors::RevokedReceivedHTLC];
+ for (idx, inp) in claim_tx.input.iter_mut().zip(inputs_des.iter()).enumerate() {
+ sign_input!(sighash_parts, inp.0, idx as u32, 0, inp.1, sum_actual_sigs);
+ }
+ assert_eq!(base_weight + ChannelMonitor::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
+
+ // Claim tx with 1 offered HTLCs, 3 received HTLCs
+ claim_tx.input.clear();
+ sum_actual_sigs = 0;
+ for i in 0..4 {
+ claim_tx.input.push(TxIn {
+ previous_output: BitcoinOutPoint {
+ txid,
+ vout: i,
+ },
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ });
+ }
+ let base_weight = claim_tx.get_weight();
+ let sighash_parts = bip143::SighashComponents::new(&claim_tx);
+ let inputs_des = vec![InputDescriptors::OfferedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC, InputDescriptors::ReceivedHTLC];
+ for (idx, inp) in claim_tx.input.iter_mut().zip(inputs_des.iter()).enumerate() {
+ sign_input!(sighash_parts, inp.0, idx as u32, 0, inp.1, sum_actual_sigs);
+ }
+ assert_eq!(base_weight + ChannelMonitor::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_sig */ (73 * inputs_des.len() - sum_actual_sigs));
+
+ // Justice tx with 1 revoked HTLC-Success tx output
+ claim_tx.input.clear();
+ sum_actual_sigs = 0;
+ claim_tx.input.push(TxIn {
+ previous_output: BitcoinOutPoint {
+ txid,
+ vout: 0,
+ },
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Vec::new(),
+ });
+ let base_weight = claim_tx.get_weight();
+ let sighash_parts = bip143::SighashComponents::new(&claim_tx);
+ let inputs_des = vec![InputDescriptors::RevokedOutput];
+ for (idx, inp) in claim_tx.input.iter_mut().zip(inputs_des.iter()).enumerate() {
+ sign_input!(sighash_parts, inp.0, idx as u32, 0, inp.1, sum_actual_sigs);
+ }
+ assert_eq!(base_weight + ChannelMonitor::get_witnesses_weight(&inputs_des[..]), claim_tx.get_weight() + /* max_length_isg */ (73 * inputs_des.len() - sum_actual_sigs));
+ }
+
+ // Further testing is done in the ChannelManager integration tests.
+}
--- /dev/null
- let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }).unwrap();
+//! A bunch of useful utilities for building networks of nodes and exchanging messages between
+//! nodes for functional tests.
+
+use chain::chaininterface;
+use chain::transaction::OutPoint;
+use chain::keysinterface::KeysInterface;
+use ln::channelmanager::{ChannelManager,RAACommitmentOrder, PaymentPreimage, PaymentHash};
+use ln::router::{Route, Router};
+use ln::msgs;
+use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler, LocalFeatures};
+use util::test_utils;
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::errors::APIError;
+use util::logger::Logger;
+use util::config::UserConfig;
+
+use bitcoin::util::hash::BitcoinHash;
+use bitcoin::blockdata::block::BlockHeader;
+use bitcoin::blockdata::transaction::{Transaction, TxOut};
+use bitcoin::network::constants::Network;
+
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::sha256d::Hash as Sha256d;
+use bitcoin_hashes::Hash;
+
+use secp256k1::Secp256k1;
+use secp256k1::key::PublicKey;
+
+use rand::{thread_rng,Rng};
+
+use std::cell::RefCell;
+use std::rc::Rc;
+use std::sync::{Arc, Mutex};
+use std::mem;
+
+pub const CHAN_CONFIRM_DEPTH: u32 = 100;
+pub fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
+ assert!(chain.does_match_tx(tx));
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
+ for i in 2..CHAN_CONFIRM_DEPTH {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
+ }
+}
+
+pub fn connect_blocks(chain: &chaininterface::ChainWatchInterfaceUtil, depth: u32, height: u32, parent: bool, prev_blockhash: Sha256d) -> Sha256d {
+ let mut header = BlockHeader { version: 0x2000000, prev_blockhash: if parent { prev_blockhash } else { Default::default() }, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ chain.block_connected_checked(&header, height + 1, &Vec::new(), &Vec::new());
+ for i in 2..depth + 1 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ chain.block_connected_checked(&header, height + i, &Vec::new(), &Vec::new());
+ }
+ header.bitcoin_hash()
+}
+
+pub struct Node {
+ pub chain_monitor: Arc<chaininterface::ChainWatchInterfaceUtil>,
+ pub tx_broadcaster: Arc<test_utils::TestBroadcaster>,
+ pub chan_monitor: Arc<test_utils::TestChannelMonitor>,
+ pub keys_manager: Arc<test_utils::TestKeysInterface>,
+ pub node: Arc<ChannelManager>,
+ pub router: Router,
+ pub node_seed: [u8; 32],
+ pub network_payment_count: Rc<RefCell<u8>>,
+ pub network_chan_count: Rc<RefCell<u32>>,
+}
+impl Drop for Node {
+ fn drop(&mut self) {
+ if !::std::thread::panicking() {
+ // Check that we processed all pending events
+ assert!(self.node.get_and_clear_pending_msg_events().is_empty());
+ assert!(self.node.get_and_clear_pending_events().is_empty());
+ assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty());
+ }
+ }
+}
+
+pub fn create_chan_between_nodes(node_a: &Node, node_b: &Node, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+ create_chan_between_nodes_with_value(node_a, node_b, 100000, 10001, a_flags, b_flags)
+}
+
+pub fn create_chan_between_nodes_with_value(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+ let (funding_locked, channel_id, tx) = create_chan_between_nodes_with_value_a(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(node_a, node_b, &funding_locked);
+ (announcement, as_update, bs_update, channel_id, tx)
+}
+
+macro_rules! get_revoke_commit_msgs {
+ ($node: expr, $node_id: expr) => {
+ {
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ (match events[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, $node_id);
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ }, match events[1] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, $node_id);
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ updates.commitment_signed.clone()
+ },
+ _ => panic!("Unexpected event"),
+ })
+ }
+ }
+}
+
+macro_rules! get_event_msg {
+ ($node: expr, $event_type: path, $node_id: expr) => {
+ {
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ $event_type { ref node_id, ref msg } => {
+ assert_eq!(*node_id, $node_id);
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ }
+}
+
+macro_rules! get_htlc_update_msgs {
+ ($node: expr, $node_id: expr) => {
+ {
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, $node_id);
+ (*updates).clone()
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ }
+}
+
+macro_rules! get_feerate {
+ ($node: expr, $channel_id: expr) => {
+ {
+ let chan_lock = $node.node.channel_state.lock().unwrap();
+ let chan = chan_lock.by_id.get(&$channel_id).unwrap();
+ chan.get_feerate()
+ }
+ }
+}
+
+pub fn create_funding_transaction(node: &Node, expected_chan_value: u64, expected_user_chan_id: u64) -> ([u8; 32], Transaction, OutPoint) {
+ let chan_id = *node.network_chan_count.borrow();
+
+ let events = node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::FundingGenerationReady { ref temporary_channel_id, ref channel_value_satoshis, ref output_script, user_channel_id } => {
+ assert_eq!(*channel_value_satoshis, expected_chan_value);
+ assert_eq!(user_channel_id, expected_user_chan_id);
+
+ let tx = Transaction { version: chan_id as u32, lock_time: 0, input: Vec::new(), output: vec![TxOut {
+ value: *channel_value_satoshis, script_pubkey: output_script.clone(),
+ }]};
+ let funding_outpoint = OutPoint::new(tx.txid(), 0);
+ (*temporary_channel_id, tx, funding_outpoint)
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
+pub fn create_chan_between_nodes_with_value_init(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> Transaction {
+ node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42).unwrap();
+ node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id())).unwrap();
+ node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id())).unwrap();
+
+ let (temporary_channel_id, tx, funding_output) = create_funding_transaction(node_a, channel_value, 42);
+
+ {
+ node_a.node.funding_transaction_generated(&temporary_channel_id, funding_output);
+ let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
+
+ node_b.node.handle_funding_created(&node_a.node.get_our_node_id(), &get_event_msg!(node_a, MessageSendEvent::SendFundingCreated, node_b.node.get_our_node_id())).unwrap();
+ {
+ let mut added_monitors = node_b.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
+
+ node_a.node.handle_funding_signed(&node_b.node.get_our_node_id(), &get_event_msg!(node_b, MessageSendEvent::SendFundingSigned, node_a.node.get_our_node_id())).unwrap();
+ {
+ let mut added_monitors = node_a.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors[0].0, funding_output);
+ added_monitors.clear();
+ }
+
+ let events_4 = node_a.node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::FundingBroadcastSafe { ref funding_txo, user_channel_id } => {
+ assert_eq!(user_channel_id, 42);
+ assert_eq!(*funding_txo, funding_output);
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ tx
+}
+
+pub fn create_chan_between_nodes_with_value_confirm_first(node_recv: &Node, node_conf: &Node, tx: &Transaction) {
+ confirm_transaction(&node_conf.chain_monitor, &tx, tx.version);
+ node_recv.node.handle_funding_locked(&node_conf.node.get_our_node_id(), &get_event_msg!(node_conf, MessageSendEvent::SendFundingLocked, node_recv.node.get_our_node_id())).unwrap();
+}
+
+pub fn create_chan_between_nodes_with_value_confirm_second(node_recv: &Node, node_conf: &Node) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
+ let channel_id;
+ let events_6 = node_conf.node.get_and_clear_pending_msg_events();
+ assert_eq!(events_6.len(), 2);
+ ((match events_6[0] {
+ MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
+ channel_id = msg.channel_id.clone();
+ assert_eq!(*node_id, node_recv.node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ }, match events_6[1] {
+ MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
+ assert_eq!(*node_id, node_recv.node.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ }), channel_id)
+}
+
+pub fn create_chan_between_nodes_with_value_confirm(node_a: &Node, node_b: &Node, tx: &Transaction) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32]) {
+ create_chan_between_nodes_with_value_confirm_first(node_a, node_b, tx);
+ confirm_transaction(&node_a.chain_monitor, &tx, tx.version);
+ create_chan_between_nodes_with_value_confirm_second(node_b, node_a)
+}
+
+pub fn create_chan_between_nodes_with_value_a(node_a: &Node, node_b: &Node, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> ((msgs::FundingLocked, msgs::AnnouncementSignatures), [u8; 32], Transaction) {
+ let tx = create_chan_between_nodes_with_value_init(node_a, node_b, channel_value, push_msat, a_flags, b_flags);
+ let (msgs, chan_id) = create_chan_between_nodes_with_value_confirm(node_a, node_b, &tx);
+ (msgs, chan_id, tx)
+}
+
+pub fn create_chan_between_nodes_with_value_b(node_a: &Node, node_b: &Node, as_funding_msgs: &(msgs::FundingLocked, msgs::AnnouncementSignatures)) -> (msgs::ChannelAnnouncement, msgs::ChannelUpdate, msgs::ChannelUpdate) {
+ node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &as_funding_msgs.0).unwrap();
+ let bs_announcement_sigs = get_event_msg!(node_b, MessageSendEvent::SendAnnouncementSignatures, node_a.node.get_our_node_id());
+ node_b.node.handle_announcement_signatures(&node_a.node.get_our_node_id(), &as_funding_msgs.1).unwrap();
+
+ let events_7 = node_b.node.get_and_clear_pending_msg_events();
+ assert_eq!(events_7.len(), 1);
+ let (announcement, bs_update) = match events_7[0] {
+ MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+ (msg, update_msg)
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ node_a.node.handle_announcement_signatures(&node_b.node.get_our_node_id(), &bs_announcement_sigs).unwrap();
+ let events_8 = node_a.node.get_and_clear_pending_msg_events();
+ assert_eq!(events_8.len(), 1);
+ let as_update = match events_8[0] {
+ MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+ assert!(*announcement == *msg);
+ assert_eq!(update_msg.contents.short_channel_id, announcement.contents.short_channel_id);
+ assert_eq!(update_msg.contents.short_channel_id, bs_update.contents.short_channel_id);
+ update_msg
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ *node_a.network_chan_count.borrow_mut() += 1;
+
+ ((*announcement).clone(), (*as_update).clone(), (*bs_update).clone())
+}
+
+pub fn create_announced_chan_between_nodes(nodes: &Vec<Node>, a: usize, b: usize, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+ create_announced_chan_between_nodes_with_value(nodes, a, b, 100000, 10001, a_flags, b_flags)
+}
+
+pub fn create_announced_chan_between_nodes_with_value(nodes: &Vec<Node>, a: usize, b: usize, channel_value: u64, push_msat: u64, a_flags: LocalFeatures, b_flags: LocalFeatures) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction) {
+ let chan_announcement = create_chan_between_nodes_with_value(&nodes[a], &nodes[b], channel_value, push_msat, a_flags, b_flags);
+ for node in nodes {
+ assert!(node.router.handle_channel_announcement(&chan_announcement.0).unwrap());
+ node.router.handle_channel_update(&chan_announcement.1).unwrap();
+ node.router.handle_channel_update(&chan_announcement.2).unwrap();
+ }
+ (chan_announcement.1, chan_announcement.2, chan_announcement.3, chan_announcement.4)
+}
+
+macro_rules! check_spends {
+ ($tx: expr, $spends_tx: expr) => {
+ {
+ $tx.verify(|out_point| {
+ if out_point.txid == $spends_tx.txid() {
+ $spends_tx.output.get(out_point.vout as usize).cloned()
+ } else {
+ None
+ }
+ }).unwrap();
+ }
+ }
+}
+
+macro_rules! get_closing_signed_broadcast {
+ ($node: expr, $dest_pubkey: expr) => {
+ {
+ let events = $node.get_and_clear_pending_msg_events();
+ assert!(events.len() == 1 || events.len() == 2);
+ (match events[events.len() - 1] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ }, if events.len() == 2 {
+ match events[0] {
+ MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+ assert_eq!(*node_id, $dest_pubkey);
+ Some(msg.clone())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else { None })
+ }
+ }
+}
+
+macro_rules! check_closed_broadcast {
+ ($node: expr) => {{
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }}
+}
+
+pub fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
+ let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
+ let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
+ let (tx_a, tx_b);
+
+ node_a.close_channel(channel_id).unwrap();
+ node_b.handle_shutdown(&node_a.get_our_node_id(), &get_event_msg!(struct_a, MessageSendEvent::SendShutdown, node_b.get_our_node_id())).unwrap();
+
+ let events_1 = node_b.get_and_clear_pending_msg_events();
+ assert!(events_1.len() >= 1);
+ let shutdown_b = match events_1[0] {
+ MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
+ assert_eq!(node_id, &node_a.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ let closing_signed_b = if !close_inbound_first {
+ assert_eq!(events_1.len(), 1);
+ None
+ } else {
+ Some(match events_1[1] {
+ MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+ assert_eq!(node_id, &node_a.get_our_node_id());
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ })
+ };
+
+ node_a.handle_shutdown(&node_b.get_our_node_id(), &shutdown_b).unwrap();
+ let (as_update, bs_update) = if close_inbound_first {
+ assert!(node_a.get_and_clear_pending_msg_events().is_empty());
+ node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
+ assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
+ tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
+ let (as_update, closing_signed_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
+
+ node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a.unwrap()).unwrap();
+ let (bs_update, none_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
+ assert!(none_b.is_none());
+ assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
+ tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
+ (as_update, bs_update)
+ } else {
+ let closing_signed_a = get_event_msg!(struct_a, MessageSendEvent::SendClosingSigned, node_b.get_our_node_id());
+
+ node_b.handle_closing_signed(&node_a.get_our_node_id(), &closing_signed_a).unwrap();
+ assert_eq!(broadcaster_b.txn_broadcasted.lock().unwrap().len(), 1);
+ tx_b = broadcaster_b.txn_broadcasted.lock().unwrap().remove(0);
+ let (bs_update, closing_signed_b) = get_closing_signed_broadcast!(node_b, node_a.get_our_node_id());
+
+ node_a.handle_closing_signed(&node_b.get_our_node_id(), &closing_signed_b.unwrap()).unwrap();
+ let (as_update, none_a) = get_closing_signed_broadcast!(node_a, node_b.get_our_node_id());
+ assert!(none_a.is_none());
+ assert_eq!(broadcaster_a.txn_broadcasted.lock().unwrap().len(), 1);
+ tx_a = broadcaster_a.txn_broadcasted.lock().unwrap().remove(0);
+ (as_update, bs_update)
+ };
+ assert_eq!(tx_a, tx_b);
+ check_spends!(tx_a, funding_tx);
+
+ (as_update, bs_update, tx_a)
+}
+
+pub struct SendEvent {
+ pub node_id: PublicKey,
+ pub msgs: Vec<msgs::UpdateAddHTLC>,
+ pub commitment_msg: msgs::CommitmentSigned,
+}
+impl SendEvent {
+ pub fn from_commitment_update(node_id: PublicKey, updates: msgs::CommitmentUpdate) -> SendEvent {
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ SendEvent { node_id: node_id, msgs: updates.update_add_htlcs, commitment_msg: updates.commitment_signed }
+ }
+
+ pub fn from_event(event: MessageSendEvent) -> SendEvent {
+ match event {
+ MessageSendEvent::UpdateHTLCs { node_id, updates } => SendEvent::from_commitment_update(node_id, updates),
+ _ => panic!("Unexpected event type!"),
+ }
+ }
+
+ pub fn from_node(node: &Node) -> SendEvent {
+ let mut events = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.pop().unwrap())
+ }
+}
+
+macro_rules! check_added_monitors {
+ ($node: expr, $count: expr) => {
+ {
+ let mut added_monitors = $node.chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), $count);
+ added_monitors.clear();
+ }
+ }
+}
+
+macro_rules! commitment_signed_dance {
+ ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */) => {
+ {
+ check_added_monitors!($node_a, 0);
+ assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
+ $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
+ check_added_monitors!($node_a, 1);
+ commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, false);
+ }
+ };
+ ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */, true /* return last RAA */) => {
+ {
+ let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!($node_a, $node_b.node.get_our_node_id());
+ check_added_monitors!($node_b, 0);
+ assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
+ $node_b.node.handle_revoke_and_ack(&$node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
+ assert!($node_b.node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!($node_b, 1);
+ $node_b.node.handle_commitment_signed(&$node_a.node.get_our_node_id(), &as_commitment_signed).unwrap();
+ let (bs_revoke_and_ack, extra_msg_option) = {
+ let events = $node_b.node.get_and_clear_pending_msg_events();
+ assert!(events.len() <= 2);
+ (match events[0] {
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, $node_a.node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ }, events.get(1).map(|e| e.clone()))
+ };
+ check_added_monitors!($node_b, 1);
+ if $fail_backwards {
+ assert!($node_a.node.get_and_clear_pending_events().is_empty());
+ assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
+ }
+ (extra_msg_option, bs_revoke_and_ack)
+ }
+ };
+ ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr, true /* skip last step */, false /* return extra message */, true /* return last RAA */) => {
+ {
+ check_added_monitors!($node_a, 0);
+ assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
+ $node_a.node.handle_commitment_signed(&$node_b.node.get_our_node_id(), &$commitment_signed).unwrap();
+ check_added_monitors!($node_a, 1);
+ let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
+ assert!(extra_msg_option.is_none());
+ bs_revoke_and_ack
+ }
+ };
+ ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, true /* return extra message */) => {
+ {
+ let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
+ $node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ check_added_monitors!($node_a, 1);
+ extra_msg_option
+ }
+ };
+ ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => {
+ {
+ assert!(commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true).is_none());
+ }
+ };
+ ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => {
+ {
+ commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
+ if $fail_backwards {
+ expect_pending_htlcs_forwardable!($node_a);
+ check_added_monitors!($node_a, 1);
+
+ let channel_state = $node_a.node.channel_state.lock().unwrap();
+ assert_eq!(channel_state.pending_msg_events.len(), 1);
+ if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
+ assert_ne!(*node_id, $node_b.node.get_our_node_id());
+ } else { panic!("Unexpected event"); }
+ } else {
+ assert!($node_a.node.get_and_clear_pending_msg_events().is_empty());
+ }
+ }
+ }
+}
+
+macro_rules! get_payment_preimage_hash {
+ ($node: expr) => {
+ {
+ let payment_preimage = PaymentPreimage([*$node.network_payment_count.borrow(); 32]);
+ *$node.network_payment_count.borrow_mut() += 1;
+ let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
+ (payment_preimage, payment_hash)
+ }
+ }
+}
+
+macro_rules! expect_pending_htlcs_forwardable {
+ ($node: expr) => {{
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ $node.node.process_pending_htlc_forwards();
+ }}
+}
+
+macro_rules! expect_payment_received {
+ ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!($expected_payment_hash, *payment_hash);
+ assert_eq!($expected_recv_value, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+macro_rules! expect_payment_sent {
+ ($node: expr, $expected_payment_preimage: expr) => {
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!($expected_payment_preimage, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+pub fn send_along_route_with_hash(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64, our_payment_hash: PaymentHash) {
+ let mut payment_event = {
+ origin_node.node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(origin_node, 1);
+
+ let mut events = origin_node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ let mut prev_node = origin_node;
+
+ for (idx, &node) in expected_route.iter().enumerate() {
+ assert_eq!(node.node.get_our_node_id(), payment_event.node_id);
+
+ node.node.handle_update_add_htlc(&prev_node.node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ check_added_monitors!(node, 0);
+ commitment_signed_dance!(node, prev_node, payment_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(node);
+
+ if idx == expected_route.len() - 1 {
+ let events_2 = node.node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(our_payment_hash, *payment_hash);
+ assert_eq!(amt, recv_value);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else {
+ let mut events_2 = node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ check_added_monitors!(node, 1);
+ payment_event = SendEvent::from_event(events_2.remove(0));
+ assert_eq!(payment_event.msgs.len(), 1);
+ }
+
+ prev_node = node;
+ }
+}
+
+pub fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
+ let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
+ send_along_route_with_hash(origin_node, route, expected_route, recv_value, our_payment_hash);
+ (our_payment_preimage, our_payment_hash)
+}
+
+pub fn claim_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_preimage: PaymentPreimage) {
+ assert!(expected_route.last().unwrap().node.claim_funds(our_payment_preimage));
+ check_added_monitors!(expected_route.last().unwrap(), 1);
+
+ let mut next_msgs: Option<(msgs::UpdateFulfillHTLC, msgs::CommitmentSigned)> = None;
+ let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
+ macro_rules! get_next_msgs {
+ ($node: expr) => {
+ {
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+ expected_next_node = node_id.clone();
+ Some((update_fulfill_htlcs[0].clone(), commitment_signed.clone()))
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ }
+ }
+
+ macro_rules! last_update_fulfill_dance {
+ ($node: expr, $prev_node: expr) => {
+ {
+ $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
+ check_added_monitors!($node, 0);
+ assert!($node.node.get_and_clear_pending_msg_events().is_empty());
+ commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
+ }
+ }
+ }
+ macro_rules! mid_update_fulfill_dance {
+ ($node: expr, $prev_node: expr, $new_msgs: expr) => {
+ {
+ $node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
+ check_added_monitors!($node, 1);
+ let new_next_msgs = if $new_msgs {
+ get_next_msgs!($node)
+ } else {
+ assert!($node.node.get_and_clear_pending_msg_events().is_empty());
+ None
+ };
+ commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, false);
+ next_msgs = new_next_msgs;
+ }
+ }
+ }
+
+ let mut prev_node = expected_route.last().unwrap();
+ for (idx, node) in expected_route.iter().rev().enumerate() {
+ assert_eq!(expected_next_node, node.node.get_our_node_id());
+ let update_next_msgs = !skip_last || idx != expected_route.len() - 1;
+ if next_msgs.is_some() {
+ mid_update_fulfill_dance!(node, prev_node, update_next_msgs);
+ } else if update_next_msgs {
+ next_msgs = get_next_msgs!(node);
+ } else {
+ assert!(node.node.get_and_clear_pending_msg_events().is_empty());
+ }
+ if !skip_last && idx == expected_route.len() - 1 {
+ assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
+ }
+
+ prev_node = node;
+ }
+
+ if !skip_last {
+ last_update_fulfill_dance!(origin_node, expected_route.first().unwrap());
+ expect_payment_sent!(origin_node, our_payment_preimage);
+ }
+}
+
+pub fn claim_payment(origin_node: &Node, expected_route: &[&Node], our_payment_preimage: PaymentPreimage) {
+ claim_payment_along_route(origin_node, expected_route, false, our_payment_preimage);
+}
+
+pub const TEST_FINAL_CLTV: u32 = 32;
+
+pub fn route_payment(origin_node: &Node, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
+ let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
+ assert_eq!(route.hops.len(), expected_route.len());
+ for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
+ assert_eq!(hop.pubkey, node.node.get_our_node_id());
+ }
+
+ send_along_route(origin_node, route, expected_route, recv_value)
+}
+
+pub fn route_over_limit(origin_node: &Node, expected_route: &[&Node], recv_value: u64) {
+ let route = origin_node.router.get_route(&expected_route.last().unwrap().node.get_our_node_id(), None, &Vec::new(), recv_value, TEST_FINAL_CLTV).unwrap();
+ assert_eq!(route.hops.len(), expected_route.len());
+ for (node, hop) in expected_route.iter().zip(route.hops.iter()) {
+ assert_eq!(hop.pubkey, node.node.get_our_node_id());
+ }
+
+ let (_, our_payment_hash) = get_payment_preimage_hash!(origin_node);
+
+ let err = origin_node.node.send_payment(route, our_payment_hash).err().unwrap();
+ match err {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
+ _ => panic!("Unknown error variants"),
+ };
+}
+
+pub fn send_payment(origin: &Node, expected_route: &[&Node], recv_value: u64) {
+ let our_payment_preimage = route_payment(&origin, expected_route, recv_value).0;
+ claim_payment(&origin, expected_route, our_payment_preimage);
+}
+
+pub fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: PaymentHash) {
+ assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash));
+ expect_pending_htlcs_forwardable!(expected_route.last().unwrap());
+ check_added_monitors!(expected_route.last().unwrap(), 1);
+
+ let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
+ macro_rules! update_fail_dance {
+ ($node: expr, $prev_node: expr, $last_node: expr) => {
+ {
+ $node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
+ commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
+ if skip_last && $last_node {
+ expect_pending_htlcs_forwardable!($node);
+ }
+ }
+ }
+ }
+
+ let mut expected_next_node = expected_route.last().unwrap().node.get_our_node_id();
+ let mut prev_node = expected_route.last().unwrap();
+ for (idx, node) in expected_route.iter().rev().enumerate() {
+ assert_eq!(expected_next_node, node.node.get_our_node_id());
+ if next_msgs.is_some() {
+ // We may be the "last node" for the purpose of the commitment dance if we're
+ // skipping the last node (implying it is disconnected) and we're the
+ // second-to-last node!
+ update_fail_dance!(node, prev_node, skip_last && idx == expected_route.len() - 1);
+ }
+
+ let events = node.node.get_and_clear_pending_msg_events();
+ if !skip_last || idx != expected_route.len() - 1 {
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fulfill_htlcs.is_empty());
+ assert_eq!(update_fail_htlcs.len(), 1);
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+ expected_next_node = node_id.clone();
+ next_msgs = Some((update_fail_htlcs[0].clone(), commitment_signed.clone()));
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else {
+ assert!(events.is_empty());
+ }
+ if !skip_last && idx == expected_route.len() - 1 {
+ assert_eq!(expected_next_node, origin_node.node.get_our_node_id());
+ }
+
+ prev_node = node;
+ }
+
+ if !skip_last {
+ update_fail_dance!(origin_node, expected_route.first().unwrap(), true);
+
+ let events = origin_node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
+ assert_eq!(payment_hash, our_payment_hash);
+ assert!(rejected_by_dest);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+pub fn fail_payment(origin_node: &Node, expected_route: &[&Node], our_payment_hash: PaymentHash) {
+ fail_payment_along_route(origin_node, expected_route, false, our_payment_hash);
+}
+
+pub fn create_network(node_count: usize, node_config: &[Option<UserConfig>]) -> Vec<Node> {
+ let mut nodes = Vec::new();
+ let mut rng = thread_rng();
+ let secp_ctx = Secp256k1::new();
+
+ let chan_count = Rc::new(RefCell::new(0));
+ let payment_count = Rc::new(RefCell::new(0));
+
+ for i in 0..node_count {
+ let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
+ let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
+ let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
+ let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+ let mut seed = [0; 32];
+ rng.fill_bytes(&mut seed);
+ let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet, Arc::clone(&logger)));
+ let chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
+ let mut default_config = UserConfig::new();
+ default_config.channel_options.announced_channel = true;
+ default_config.peer_channel_config_limits.force_announced_channel_preference = false;
++ let node = ChannelManager::new(Network::Testnet, feeest.clone(), chan_monitor.clone(), chain_monitor.clone(), tx_broadcaster.clone(), Arc::clone(&logger), keys_manager.clone(), if node_config[i].is_some() { node_config[i].clone().unwrap() } else { default_config }, 0).unwrap();
+ let router = Router::new(PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()), chain_monitor.clone(), Arc::clone(&logger));
+ nodes.push(Node { chain_monitor, tx_broadcaster, chan_monitor, node, router, keys_manager, node_seed: seed,
+ network_payment_count: payment_count.clone(),
+ network_chan_count: chan_count.clone(),
+ });
+ }
+
+ nodes
+}
+
+#[derive(PartialEq)]
+pub enum HTLCType { NONE, TIMEOUT, SUCCESS }
+/// Tests that the given node has broadcast transactions for the given Channel
+///
+/// First checks that the latest local commitment tx has been broadcast, unless an explicit
+/// commitment_tx is provided, which may be used to test that a remote commitment tx was
+/// broadcast and the revoked outputs were claimed.
+///
+/// Next tests that there is (or is not) a transaction that spends the commitment transaction
+/// that appears to be the type of HTLC transaction specified in has_htlc_tx.
+///
+/// All broadcast transactions must be accounted for in one of the above three types of we'll
+/// also fail.
+pub fn test_txn_broadcast(node: &Node, chan: &(msgs::ChannelUpdate, msgs::ChannelUpdate, [u8; 32], Transaction), commitment_tx: Option<Transaction>, has_htlc_tx: HTLCType) -> Vec<Transaction> {
+ let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert!(node_txn.len() >= if commitment_tx.is_some() { 0 } else { 1 } + if has_htlc_tx == HTLCType::NONE { 0 } else { 1 });
+
+ let mut res = Vec::with_capacity(2);
+ node_txn.retain(|tx| {
+ if tx.input.len() == 1 && tx.input[0].previous_output.txid == chan.3.txid() {
+ check_spends!(tx, chan.3.clone());
+ if commitment_tx.is_none() {
+ res.push(tx.clone());
+ }
+ false
+ } else { true }
+ });
+ if let Some(explicit_tx) = commitment_tx {
+ res.push(explicit_tx.clone());
+ }
+
+ assert_eq!(res.len(), 1);
+
+ if has_htlc_tx != HTLCType::NONE {
+ node_txn.retain(|tx| {
+ if tx.input.len() == 1 && tx.input[0].previous_output.txid == res[0].txid() {
+ check_spends!(tx, res[0].clone());
+ if has_htlc_tx == HTLCType::TIMEOUT {
+ assert!(tx.lock_time != 0);
+ } else {
+ assert!(tx.lock_time == 0);
+ }
+ res.push(tx.clone());
+ false
+ } else { true }
+ });
+ assert!(res.len() == 2 || res.len() == 3);
+ if res.len() == 3 {
+ assert_eq!(res[1], res[2]);
+ }
+ }
+
+ assert!(node_txn.is_empty());
+ res
+}
+
+/// Tests that the given node has broadcast a claim transaction against the provided revoked
+/// HTLC transaction.
+pub fn test_revoked_htlc_claim_txn_broadcast(node: &Node, revoked_tx: Transaction) {
+ let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ node_txn.retain(|tx| {
+ if tx.input.len() == 1 && tx.input[0].previous_output.txid == revoked_tx.txid() {
+ check_spends!(tx, revoked_tx.clone());
+ false
+ } else { true }
+ });
+ assert!(node_txn.is_empty());
+}
+
+pub fn check_preimage_claim(node: &Node, prev_txn: &Vec<Transaction>) -> Vec<Transaction> {
+ let mut node_txn = node.tx_broadcaster.txn_broadcasted.lock().unwrap();
+
+ assert!(node_txn.len() >= 1);
+ assert_eq!(node_txn[0].input.len(), 1);
+ let mut found_prev = false;
+
+ for tx in prev_txn {
+ if node_txn[0].input[0].previous_output.txid == tx.txid() {
+ check_spends!(node_txn[0], tx.clone());
+ assert!(node_txn[0].input[0].witness[2].len() > 106); // must spend an htlc output
+ assert_eq!(tx.input.len(), 1); // must spend a commitment tx
+
+ found_prev = true;
+ break;
+ }
+ }
+ assert!(found_prev);
+
+ let mut res = Vec::new();
+ mem::swap(&mut *node_txn, &mut res);
+ res
+}
+
+pub fn get_announce_close_broadcast_events(nodes: &Vec<Node>, a: usize, b: usize) {
+ let events_1 = nodes[a].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_1.len(), 1);
+ let as_update = match events_1[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ let events_2 = nodes[b].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ let bs_update = match events_2[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ msg.clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ for node in nodes {
+ node.router.handle_channel_update(&as_update).unwrap();
+ node.router.handle_channel_update(&bs_update).unwrap();
+ }
+}
+
+macro_rules! get_channel_value_stat {
+ ($node: expr, $channel_id: expr) => {{
+ let chan_lock = $node.node.channel_state.lock().unwrap();
+ let chan = chan_lock.by_id.get(&$channel_id).unwrap();
+ chan.get_value_stat()
+ }}
+}
+
+macro_rules! get_chan_reestablish_msgs {
+ ($src_node: expr, $dst_node: expr) => {
+ {
+ let mut res = Vec::with_capacity(1);
+ for msg in $src_node.node.get_and_clear_pending_msg_events() {
+ if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ res.push(msg.clone());
+ } else {
+ panic!("Unexpected event")
+ }
+ }
+ res
+ }
+ }
+}
+
+macro_rules! handle_chan_reestablish_msgs {
+ ($src_node: expr, $dst_node: expr) => {
+ {
+ let msg_events = $src_node.node.get_and_clear_pending_msg_events();
+ let mut idx = 0;
+ let funding_locked = if let Some(&MessageSendEvent::SendFundingLocked { ref node_id, ref msg }) = msg_events.get(0) {
+ idx += 1;
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ Some(msg.clone())
+ } else {
+ None
+ };
+
+ let mut revoke_and_ack = None;
+ let mut commitment_update = None;
+ let order = if let Some(ev) = msg_events.get(idx) {
+ idx += 1;
+ match ev {
+ &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ revoke_and_ack = Some(msg.clone());
+ RAACommitmentOrder::RevokeAndACKFirst
+ },
+ &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ commitment_update = Some(updates.clone());
+ RAACommitmentOrder::CommitmentFirst
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else {
+ RAACommitmentOrder::CommitmentFirst
+ };
+
+ if let Some(ev) = msg_events.get(idx) {
+ match ev {
+ &MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ assert!(revoke_and_ack.is_none());
+ revoke_and_ack = Some(msg.clone());
+ },
+ &MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, $dst_node.node.get_our_node_id());
+ assert!(commitment_update.is_none());
+ commitment_update = Some(updates.clone());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ (funding_locked, revoke_and_ack, commitment_update, order)
+ }
+ }
+}
+
+/// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
+/// for claims/fails they are separated out.
+pub fn reconnect_nodes(node_a: &Node, node_b: &Node, send_funding_locked: (bool, bool), pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
+ node_a.node.peer_connected(&node_b.node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(node_a, node_b);
+ node_b.node.peer_connected(&node_a.node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(node_b, node_a);
+
+ if send_funding_locked.0 {
+ // If a expects a funding_locked, it better not think it has received a revoke_and_ack
+ // from b
+ for reestablish in reestablish_1.iter() {
+ assert_eq!(reestablish.next_remote_commitment_number, 0);
+ }
+ }
+ if send_funding_locked.1 {
+ // If b expects a funding_locked, it better not think it has received a revoke_and_ack
+ // from a
+ for reestablish in reestablish_2.iter() {
+ assert_eq!(reestablish.next_remote_commitment_number, 0);
+ }
+ }
+ if send_funding_locked.0 || send_funding_locked.1 {
+ // If we expect any funding_locked's, both sides better have set
+ // next_local_commitment_number to 1
+ for reestablish in reestablish_1.iter() {
+ assert_eq!(reestablish.next_local_commitment_number, 1);
+ }
+ for reestablish in reestablish_2.iter() {
+ assert_eq!(reestablish.next_local_commitment_number, 1);
+ }
+ }
+
+ let mut resp_1 = Vec::new();
+ for msg in reestablish_1 {
+ node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap();
+ resp_1.push(handle_chan_reestablish_msgs!(node_b, node_a));
+ }
+ if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
+ check_added_monitors!(node_b, 1);
+ } else {
+ check_added_monitors!(node_b, 0);
+ }
+
+ let mut resp_2 = Vec::new();
+ for msg in reestablish_2 {
+ node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap();
+ resp_2.push(handle_chan_reestablish_msgs!(node_a, node_b));
+ }
+ if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
+ check_added_monitors!(node_a, 1);
+ } else {
+ check_added_monitors!(node_a, 0);
+ }
+
+ // We don't yet support both needing updates, as that would require a different commitment dance:
+ assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
+ (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
+
+ for chan_msgs in resp_1.drain(..) {
+ if send_funding_locked.0 {
+ node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
+ let announcement_event = node_a.node.get_and_clear_pending_msg_events();
+ if !announcement_event.is_empty() {
+ assert_eq!(announcement_event.len(), 1);
+ if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
+ //TODO: Test announcement_sigs re-sending
+ } else { panic!("Unexpected event!"); }
+ }
+ } else {
+ assert!(chan_msgs.0.is_none());
+ }
+ if pending_raa.0 {
+ assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
+ node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
+ assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(node_a, 1);
+ } else {
+ assert!(chan_msgs.1.is_none());
+ }
+ if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
+ let commitment_update = chan_msgs.2.unwrap();
+ if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
+ } else {
+ assert!(commitment_update.update_add_htlcs.is_empty());
+ }
+ assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+ assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
+ assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+ for update_add in commitment_update.update_add_htlcs {
+ node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
+ }
+ for update_fulfill in commitment_update.update_fulfill_htlcs {
+ node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
+ }
+ for update_fail in commitment_update.update_fail_htlcs {
+ node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
+ }
+
+ if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+ commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
+ } else {
+ node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(node_a, 1);
+ let as_revoke_and_ack = get_event_msg!(node_a, MessageSendEvent::SendRevokeAndACK, node_b.node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap();
+ assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(node_b, 1);
+ }
+ } else {
+ assert!(chan_msgs.2.is_none());
+ }
+ }
+
+ for chan_msgs in resp_2.drain(..) {
+ if send_funding_locked.1 {
+ node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
+ let announcement_event = node_b.node.get_and_clear_pending_msg_events();
+ if !announcement_event.is_empty() {
+ assert_eq!(announcement_event.len(), 1);
+ if let MessageSendEvent::SendAnnouncementSignatures { .. } = announcement_event[0] {
+ //TODO: Test announcement_sigs re-sending
+ } else { panic!("Unexpected event!"); }
+ }
+ } else {
+ assert!(chan_msgs.0.is_none());
+ }
+ if pending_raa.1 {
+ assert!(chan_msgs.3 == RAACommitmentOrder::RevokeAndACKFirst);
+ node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap();
+ assert!(node_b.node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(node_b, 1);
+ } else {
+ assert!(chan_msgs.1.is_none());
+ }
+ if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
+ let commitment_update = chan_msgs.2.unwrap();
+ if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
+ }
+ assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+ assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
+ assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+ for update_add in commitment_update.update_add_htlcs {
+ node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
+ }
+ for update_fulfill in commitment_update.update_fulfill_htlcs {
+ node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
+ }
+ for update_fail in commitment_update.update_fail_htlcs {
+ node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
+ }
+
+ if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+ commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
+ } else {
+ node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(node_b, 1);
+ let bs_revoke_and_ack = get_event_msg!(node_b, MessageSendEvent::SendRevokeAndACK, node_a.node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ assert!(node_a.node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(node_a, 1);
+ }
+ } else {
+ assert!(chan_msgs.2.is_none());
+ }
+ }
+}
--- /dev/null
- Err(msgs::HandleError{ err: error_str, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) => {
- assert_eq!(error_str, expected_error_str, "unexpected HandleError string (expected `{}`, actual `{}`)", expected_error_str, error_str)
+//! Tests that test standing up a network of ChannelManagers, creating channels, sending
+//! payments/messages between them, and often checking the resulting ChannelMonitors are able to
+//! claim outputs on-chain.
+
+use chain::transaction::OutPoint;
+use chain::chaininterface::{ChainListener, ChainWatchInterface, ChainWatchInterfaceUtil};
+use chain::keysinterface::{KeysInterface, SpendableOutputDescriptor, KeysManager};
+use chain::keysinterface;
+use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
+use ln::channelmanager::{ChannelManager,ChannelManagerReadArgs,HTLCForwardInfo,RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT};
+use ln::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ManyChannelMonitor, ANTI_REORG_DELAY};
+use ln::channel::{ACCEPTED_HTLC_SCRIPT_WEIGHT, OFFERED_HTLC_SCRIPT_WEIGHT, Channel, ChannelError};
+use ln::onion_utils;
+use ln::router::{Route, RouteHop};
+use ln::msgs;
+use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, LocalFeatures, ErrorAction};
+use util::test_utils;
+use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::errors::APIError;
+use util::ser::{Writeable, ReadableArgs};
+use util::config::UserConfig;
+use util::logger::Logger;
+
+use bitcoin::util::hash::BitcoinHash;
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+use bitcoin::util::bip143;
+use bitcoin::util::address::Address;
+use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey};
+use bitcoin::blockdata::block::{Block, BlockHeader};
+use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType, OutPoint as BitcoinOutPoint};
+use bitcoin::blockdata::script::{Builder, Script};
+use bitcoin::blockdata::opcodes;
+use bitcoin::blockdata::constants::genesis_block;
+use bitcoin::network::constants::Network;
+
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::Hash;
+
+use secp256k1::{Secp256k1, Message};
+use secp256k1::key::{PublicKey,SecretKey};
+
+use std::collections::{BTreeSet, HashMap, HashSet};
+use std::default::Default;
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::Ordering;
+use std::mem;
+
+use rand::{thread_rng, Rng};
+
+use ln::functional_test_utils::*;
+
+#[test]
+fn test_insane_channel_opens() {
+ // Stand up a network of 2 nodes
+ let nodes = create_network(2, &[None, None]);
+
+ // Instantiate channel parameters where we push the maximum msats given our
+ // funding satoshis
+ let channel_value_sat = 31337; // same as funding satoshis
+ let channel_reserve_satoshis = Channel::get_our_channel_reserve_satoshis(channel_value_sat);
+ let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
+
+ // Have node0 initiate a channel to node1 with aforementioned parameters
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_sat, push_msat, 42).unwrap();
+
+ // Extract the channel open message from node0 to node1
+ let open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ // Test helper that asserts we get the correct error string given a mutator
+ // that supposedly makes the channel open message insane
+ let insane_open_helper = |expected_error_str, message_mutator: fn(msgs::OpenChannel) -> msgs::OpenChannel| {
+ match nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &message_mutator(open_channel_message.clone())) {
- Err(msgs::HandleError{..}) => {panic!("unexpected HandleError action")},
++ Err(msgs::LightningError{ err: error_str, action: msgs::ErrorAction::SendErrorMessage {..}}) => {
++ assert_eq!(error_str, expected_error_str, "unexpected LightningError string (expected `{}`, actual `{}`)", expected_error_str, error_str)
+ },
- if let Err(msgs::HandleError{action: Some(msgs::ErrorAction::SendErrorMessage{msg}), ..}) =
++ Err(msgs::LightningError{..}) => {panic!("unexpected LightningError action")},
+ _ => panic!("insane OpenChannel message was somehow Ok"),
+ }
+ };
+
+ use ln::channel::MAX_FUNDING_SATOSHIS;
+ use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT;
+
+ // Test all mutations that would make the channel open message insane
+ insane_open_helper("funding value > 2^24", |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg });
+
+ insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg });
+
+ insane_open_helper("push_msat larger than funding value", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg });
+
+ insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg });
+
+ insane_open_helper("Bogus; channel reserve is less than dust limit", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg });
+
+ insane_open_helper("Minimum htlc value is full channel value", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg });
+
+ insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg });
+
+ insane_open_helper("0 max_accpted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg });
+
+ insane_open_helper("max_accpted_htlcs > 483", |mut msg| { msg.max_accepted_htlcs = 484; msg });
+}
+
+#[test]
+fn test_async_inbound_update_fee() {
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let channel_id = chan.2;
+
+ // balancing
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+
+ // A B
+ // update_fee ->
+ // send (1) commitment_signed -.
+ // <- update_add_htlc/commitment_signed
+ // send (2) RAA (awaiting remote revoke) -.
+ // (1) commitment_signed is delivered ->
+ // .- send (3) RAA (awaiting remote revoke)
+ // (2) RAA is delivered ->
+ // .- send (4) commitment_signed
+ // <- (3) RAA is delivered
+ // send (5) commitment_signed -.
+ // <- (4) commitment_signed is delivered
+ // send (6) RAA -.
+ // (5) commitment_signed is delivered ->
+ // <- RAA
+ // (6) RAA is delivered ->
+
+ // First nodes[0] generates an update_fee
+ nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_0.len(), 1);
+ let (update_msg, commitment_signed) = match events_0[0] { // (1)
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
+ (update_fee.as_ref(), commitment_signed)
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
+
+ // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let payment_event = {
+ let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_1.len(), 1);
+ SendEvent::from_event(events_1.remove(0))
+ };
+ assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
+ assert_eq!(payment_event.msgs.len(), 1);
+
+ // ...now when the messages get delivered everyone should be happy
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
+ let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ // deliver(1), generate (3):
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
+ let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // nodes[1] is awaiting nodes[0] revoke_and_ack so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap(); // deliver (2)
+ let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(bs_update.update_add_htlcs.is_empty()); // (4)
+ assert!(bs_update.update_fulfill_htlcs.is_empty()); // (4)
+ assert!(bs_update.update_fail_htlcs.is_empty()); // (4)
+ assert!(bs_update.update_fail_malformed_htlcs.is_empty()); // (4)
+ assert!(bs_update.update_fee.is_none()); // (4)
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap(); // deliver (3)
+ let as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert!(as_update.update_add_htlcs.is_empty()); // (5)
+ assert!(as_update.update_fulfill_htlcs.is_empty()); // (5)
+ assert!(as_update.update_fail_htlcs.is_empty()); // (5)
+ assert!(as_update.update_fail_malformed_htlcs.is_empty()); // (5)
+ assert!(as_update.update_fee.is_none()); // (5)
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_update.commitment_signed).unwrap(); // deliver (4)
+ let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // only (6) so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update.commitment_signed).unwrap(); // deliver (5)
+ let bs_second_revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_2 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::PendingHTLCsForwardable {..} => {}, // If we actually processed we'd receive the payment
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap(); // deliver (6)
+ check_added_monitors!(nodes[1], 1);
+}
+
+#[test]
+fn test_update_fee_unordered_raa() {
+ // Just the intro to the previous test followed by an out-of-order RAA (which caused a
+ // crash in an earlier version of the update_fee patch)
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let channel_id = chan.2;
+
+ // balancing
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+
+ // First nodes[0] generates an update_fee
+ nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_0.len(), 1);
+ let update_msg = match events_0[0] { // (1)
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, .. }, .. } => {
+ update_fee.as_ref()
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
+
+ // ...but before it's delivered, nodes[1] starts to send a payment back to nodes[0]...
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[1].node.send_payment(nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap(), our_payment_hash).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let payment_event = {
+ let mut events_1 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_1.len(), 1);
+ SendEvent::from_event(events_1.remove(0))
+ };
+ assert_eq!(payment_event.node_id, nodes[0].node.get_our_node_id());
+ assert_eq!(payment_event.msgs.len(), 1);
+
+ // ...now when the messages get delivered everyone should be happy
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap(); // (2)
+ let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // nodes[0] is awaiting nodes[1] revoke_and_ack so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap(); // deliver (2)
+ check_added_monitors!(nodes[1], 1);
+
+ // We can't continue, sadly, because our (1) now has a bogus signature
+}
+
+#[test]
+fn test_multi_flight_update_fee() {
+ let nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let channel_id = chan.2;
+
+ // A B
+ // update_fee/commitment_signed ->
+ // .- send (1) RAA and (2) commitment_signed
+ // update_fee (never committed) ->
+ // (3) update_fee ->
+ // We have to manually generate the above update_fee, it is allowed by the protocol but we
+ // don't track which updates correspond to which revoke_and_ack responses so we're in
+ // AwaitingRAA mode and will not generate the update_fee yet.
+ // <- (1) RAA delivered
+ // (3) is generated and send (4) CS -.
+ // Note that A cannot generate (4) prior to (1) being delivered as it otherwise doesn't
+ // know the per_commitment_point to use for it.
+ // <- (2) commitment_signed delivered
+ // revoke_and_ack ->
+ // B should send no response here
+ // (4) commitment_signed delivered ->
+ // <- RAA/commitment_signed delivered
+ // revoke_and_ack ->
+
+ // First nodes[0] generates an update_fee
+ let initial_feerate = get_feerate!(nodes[0], channel_id);
+ nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_0.len(), 1);
+ let (update_msg_1, commitment_signed_1) = match events_0[0] { // (1)
+ MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => {
+ (update_fee.as_ref().unwrap(), commitment_signed)
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ // Deliver first update_fee/commitment_signed pair, generating (1) and (2):
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg_1).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed_1).unwrap();
+ let (bs_revoke_msg, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment
+ // transaction:
+ nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ // Create the (3) update_fee message that nodes[0] will generate before it does...
+ let mut update_msg_2 = msgs::UpdateFee {
+ channel_id: update_msg_1.channel_id.clone(),
+ feerate_per_kw: (initial_feerate + 30) as u32,
+ };
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
+
+ update_msg_2.feerate_per_kw = (initial_feerate + 40) as u32;
+ // Deliver (3)
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg_2).unwrap();
+
+ // Deliver (1), generating (3) and (4)
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_msg).unwrap();
+ let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ check_added_monitors!(nodes[0], 1);
+ assert!(as_second_update.update_add_htlcs.is_empty());
+ assert!(as_second_update.update_fulfill_htlcs.is_empty());
+ assert!(as_second_update.update_fail_htlcs.is_empty());
+ assert!(as_second_update.update_fail_malformed_htlcs.is_empty());
+ // Check that the update_fee newly generated matches what we delivered:
+ assert_eq!(as_second_update.update_fee.as_ref().unwrap().channel_id, update_msg_2.channel_id);
+ assert_eq!(as_second_update.update_fee.as_ref().unwrap().feerate_per_kw, update_msg_2.feerate_per_kw);
+
+ // Deliver (2) commitment_signed
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
+ let as_revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ check_added_monitors!(nodes[0], 1);
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_msg).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ // Delever (4)
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed).unwrap();
+ let (bs_second_revoke, bs_second_commitment) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment).unwrap();
+ let as_second_revoke = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_revoke).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+}
+
+#[test]
+fn test_update_fee_vanilla() {
+ let nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let channel_id = chan.2;
+
+ let feerate = get_feerate!(nodes[0], channel_id);
+ nodes[0].node.update_fee(channel_id, feerate+25).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_0.len(), 1);
+ let (update_msg, commitment_signed) = match events_0[0] {
+ MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
+ (update_fee.as_ref(), commitment_signed)
+ },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
+ let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
+ let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+}
+
+#[test]
+fn test_update_fee_that_funder_cannot_afford() {
+ let nodes = create_network(2, &[None, None]);
+ let channel_value = 1888;
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 700000, LocalFeatures::new(), LocalFeatures::new());
+ let channel_id = chan.2;
+
+ let feerate = 260;
+ nodes[0].node.update_fee(channel_id, feerate).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update_msg.update_fee.unwrap()).unwrap();
+
+ commitment_signed_dance!(nodes[1], nodes[0], update_msg.commitment_signed, false);
+
+ //Confirm that the new fee based on the last local commitment txn is what we expected based on the feerate of 260 set above.
+ //This value results in a fee that is exactly what the funder can afford (277 sat + 1000 sat channel reserve)
+ {
+ let chan_lock = nodes[1].node.channel_state.lock().unwrap();
+ let chan = chan_lock.by_id.get(&channel_id).unwrap();
+
+ //We made sure neither party's funds are below the dust limit so -2 non-HTLC txns from number of outputs
+ let num_htlcs = chan.last_local_commitment_txn[0].output.len() - 2;
+ let total_fee: u64 = feerate * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
+ let mut actual_fee = chan.last_local_commitment_txn[0].output.iter().fold(0, |acc, output| acc + output.value);
+ actual_fee = channel_value - actual_fee;
+ assert_eq!(total_fee, actual_fee);
+ } //drop the mutex
+
+ //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially
+ //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down)
+ nodes[0].node.update_fee(channel_id, feerate+2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), &update2_msg.update_fee.unwrap()).unwrap();
+
+ //While producing the commitment_signed response after handling a received update_fee request the
+ //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve)
+ //Should produce and error.
+ let err = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update2_msg.commitment_signed).unwrap_err();
+
+ assert!(match err.err {
+ "Funding remote cannot afford proposed new fee" => true,
+ _ => false,
+ });
+
+ //clear the message we could not handle
+ nodes[1].node.get_and_clear_pending_msg_events();
+}
+
+#[test]
+fn test_update_fee_with_fundee_update_add_htlc() {
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let channel_id = chan.2;
+
+ // balancing
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+
+ let feerate = get_feerate!(nodes[0], channel_id);
+ nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_0.len(), 1);
+ let (update_msg, commitment_signed) = match events_0[0] {
+ MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
+ (update_fee.as_ref(), commitment_signed)
+ },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
+ let (revoke_msg, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800000, TEST_FINAL_CLTV).unwrap();
+
+ let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[1]);
+
+ // nothing happens since node[1] is in AwaitingRemoteRevoke
+ nodes[1].node.send_payment(route, our_payment_hash).unwrap();
+ {
+ let mut added_monitors = nodes[0].chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 0);
+ added_monitors.clear();
+ }
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ // node[1] has nothing to do
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
+ let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ // AwaitingRemoteRevoke ends here
+
+ let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert_eq!(commitment_update.update_add_htlcs.len(), 1);
+ assert_eq!(commitment_update.update_fulfill_htlcs.len(), 0);
+ assert_eq!(commitment_update.update_fail_htlcs.len(), 0);
+ assert_eq!(commitment_update.update_fail_malformed_htlcs.len(), 0);
+ assert_eq!(commitment_update.update_fee.is_none(), true);
+
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &commitment_update.update_add_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let (revoke, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let revoke = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentReceived { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+
+ claim_payment(&nodes[1], &vec!(&nodes[0])[..], our_payment_preimage);
+
+ send_payment(&nodes[1], &vec!(&nodes[0])[..], 800000);
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 800000);
+ close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+}
+
+#[test]
+fn test_update_fee() {
+ let nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let channel_id = chan.2;
+
+ // A B
+ // (1) update_fee/commitment_signed ->
+ // <- (2) revoke_and_ack
+ // .- send (3) commitment_signed
+ // (4) update_fee/commitment_signed ->
+ // .- send (5) revoke_and_ack (no CS as we're awaiting a revoke)
+ // <- (3) commitment_signed delivered
+ // send (6) revoke_and_ack -.
+ // <- (5) deliver revoke_and_ack
+ // (6) deliver revoke_and_ack ->
+ // .- send (7) commitment_signed in response to (4)
+ // <- (7) deliver commitment_signed
+ // revoke_and_ack ->
+
+ // Create and deliver (1)...
+ let feerate = get_feerate!(nodes[0], channel_id);
+ nodes[0].node.update_fee(channel_id, feerate+20).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_0.len(), 1);
+ let (update_msg, commitment_signed) = match events_0[0] {
+ MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
+ (update_fee.as_ref(), commitment_signed)
+ },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
+
+ // Generate (2) and (3):
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
+ let (revoke_msg, commitment_signed_0) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ // Deliver (2):
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ // Create and deliver (4)...
+ nodes[0].node.update_fee(channel_id, feerate+30).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let events_0 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_0.len(), 1);
+ let (update_msg, commitment_signed) = match events_0[0] {
+ MessageSendEvent::UpdateHTLCs { node_id:_, updates: msgs::CommitmentUpdate { update_add_htlcs:_, update_fulfill_htlcs:_, update_fail_htlcs:_, update_fail_malformed_htlcs:_, ref update_fee, ref commitment_signed } } => {
+ (update_fee.as_ref(), commitment_signed)
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ // ... creating (5)
+ let revoke_msg = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+
+ // Handle (3), creating (6):
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_0).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let revoke_msg_0 = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+
+ // Deliver (5):
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_msg).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ // Deliver (6), creating (7):
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg_0).unwrap();
+ let commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(commitment_update.update_add_htlcs.is_empty());
+ assert!(commitment_update.update_fulfill_htlcs.is_empty());
+ assert!(commitment_update.update_fail_htlcs.is_empty());
+ assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+ assert!(commitment_update.update_fee.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ // Deliver (7)
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let revoke_msg = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &revoke_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ assert_eq!(get_feerate!(nodes[0], channel_id), feerate + 30);
+ assert_eq!(get_feerate!(nodes[1], channel_id), feerate + 30);
+ close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true);
+}
+
+#[test]
+fn pre_funding_lock_shutdown_test() {
+ // Test sending a shutdown prior to funding_locked after funding generation
+ let nodes = create_network(2, &[None, None]);
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 8000000, 0, LocalFeatures::new(), LocalFeatures::new());
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
+ nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx; 1], &[1; 1]);
+
+ nodes[0].node.close_channel(&OutPoint::new(tx.txid(), 0).to_channel_id()).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+ let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+ let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ assert!(node_0_none.is_none());
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ assert!(nodes[1].node.list_channels().is_empty());
+}
+
+#[test]
+fn updates_shutdown_wait() {
+ // Test sending a shutdown with outstanding updates pending
+ let mut nodes = create_network(3, &[None, None, None]);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+ let route_1 = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let route_2 = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+
+ let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+
+ nodes[0].node.close_channel(&chan_1.2).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ if let Err(APIError::ChannelUnavailable {..}) = nodes[0].node.send_payment(route_1, payment_hash) {}
+ else { panic!("New sends should fail!") };
+ if let Err(APIError::ChannelUnavailable {..}) = nodes[1].node.send_payment(route_2, payment_hash) {}
+ else { panic!("New sends should fail!") };
+
+ assert!(nodes[2].node.claim_funds(our_payment_preimage));
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+ assert!(updates_2.update_add_htlcs.is_empty());
+ assert!(updates_2.update_fail_htlcs.is_empty());
+ assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+ assert!(updates_2.update_fee.is_none());
+ assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(our_payment_preimage, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+ let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+ let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ assert!(node_0_none.is_none());
+
+ assert!(nodes[0].node.list_channels().is_empty());
+
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+ assert!(nodes[1].node.list_channels().is_empty());
+ assert!(nodes[2].node.list_channels().is_empty());
+}
+
+#[test]
+fn htlc_fail_async_shutdown() {
+ // Test HTLCs fail if shutdown starts even if messages are delivered out-of-order
+ let mut nodes = create_network(3, &[None, None, None]);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_add_htlcs.len(), 1);
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+
+ nodes[1].node.close_channel(&chan_1.2).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
+
+ let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates_2.update_add_htlcs.is_empty());
+ assert!(updates_2.update_fulfill_htlcs.is_empty());
+ assert_eq!(updates_2.update_fail_htlcs.len(), 1);
+ assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+ assert!(updates_2.update_fee.is_none());
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } => {
+ assert_eq!(our_payment_hash, *payment_hash);
+ assert!(!rejected_by_dest);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 2);
+ let node_0_closing_signed = match msg_events[0] {
+ MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ (*msg).clone()
+ },
+ _ => panic!("Unexpected event"),
+ };
+ match msg_events[1] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
+ assert_eq!(msg.contents.short_channel_id, chan_1.0.contents.short_channel_id);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+ let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+ let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ assert!(node_0_none.is_none());
+
+ assert!(nodes[0].node.list_channels().is_empty());
+
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+ assert!(nodes[1].node.list_channels().is_empty());
+ assert!(nodes[2].node.list_channels().is_empty());
+}
+
+fn do_test_shutdown_rebroadcast(recv_count: u8) {
+ // Test that shutdown/closing_signed is re-sent on reconnect with a variable number of
+ // messages delivered prior to disconnect
+ let nodes = create_network(3, &[None, None, None]);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100000);
+
+ nodes[1].node.close_channel(&chan_1.2).unwrap();
+ let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ if recv_count > 0 {
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ if recv_count > 1 {
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown).unwrap();
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let node_0_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let node_1_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_reestablish).unwrap();
+ let node_1_2nd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ assert!(node_1_shutdown == node_1_2nd_shutdown);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish).unwrap();
+ let node_0_2nd_shutdown = if recv_count > 0 {
+ let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
+ node_0_2nd_shutdown
+ } else {
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown).unwrap();
+ get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
+ };
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown).unwrap();
+
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ assert!(nodes[2].node.claim_funds(our_payment_preimage));
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false);
+
+ assert!(updates_2.update_add_htlcs.is_empty());
+ assert!(updates_2.update_fail_htlcs.is_empty());
+ assert!(updates_2.update_fail_malformed_htlcs.is_empty());
+ assert!(updates_2.update_fee.is_none());
+ assert_eq!(updates_2.update_fulfill_htlcs.len(), 1);
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates_2.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], updates_2.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(our_payment_preimage, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ if recv_count > 0 {
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed).unwrap();
+ let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ assert!(node_1_closing_signed.is_some());
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let node_0_2nd_reestablish = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ if recv_count == 0 {
+ // If all closing_signeds weren't delivered we can just resume where we left off...
+ let node_1_2nd_reestablish = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_2nd_reestablish).unwrap();
+ let node_0_3rd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ assert!(node_0_2nd_shutdown == node_0_3rd_shutdown);
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish).unwrap();
+ let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
+
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown).unwrap();
+ let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
+ assert!(node_0_closing_signed == node_0_2nd_closing_signed);
+
+ nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed).unwrap();
+ let (_, node_1_closing_signed) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed.unwrap()).unwrap();
+ let (_, node_0_none) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
+ assert!(node_0_none.is_none());
+ } else {
+ // If one node, however, received + responded with an identical closing_signed we end
+ // up erroring and node[0] will try to broadcast its own latest commitment transaction.
+ // There isn't really anything better we can do simply, but in the future we might
+ // explore storing a set of recently-closed channels that got disconnected during
+ // closing_signed and avoiding broadcasting local commitment txn for some timeout to
+ // give our counterparty enough time to (potentially) broadcast a cooperative closing
+ // transaction.
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
- use ln::msgs::HandleError;
++ if let Err(msgs::LightningError{action: msgs::ErrorAction::SendErrorMessage{msg}, ..}) =
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &node_0_2nd_reestablish) {
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msg);
+ let msgs::ErrorMessage {ref channel_id, ..} = msg;
+ assert_eq!(*channel_id, chan_1.2);
+ } else { panic!("Needed SendErrorMessage close"); }
+
+ // get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
+ // checks it, but in this case nodes[0] didn't ever get a chance to receive a
+ // closing_signed so we do it ourselves
+ check_closed_broadcast!(nodes[0]);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+
+ assert_eq!(nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1);
+ nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clear();
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, true);
+ assert!(nodes[1].node.list_channels().is_empty());
+ assert!(nodes[2].node.list_channels().is_empty());
+}
+
+#[test]
+fn test_shutdown_rebroadcast() {
+ do_test_shutdown_rebroadcast(0);
+ do_test_shutdown_rebroadcast(1);
+ do_test_shutdown_rebroadcast(2);
+}
+
+#[test]
+fn fake_network_test() {
+ // Simple test which builds a network of ChannelManagers, connects them to each other, and
+ // tests that payments get routed and transactions broadcast in semi-reasonable ways.
+ let nodes = create_network(4, &[None, None, None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+ let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance the network a bit by relaying one payment through all the channels...
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 8000000);
+
+ // Send some more payments
+ send_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 1000000);
+ send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1], &nodes[0])[..], 1000000);
+ send_payment(&nodes[3], &vec!(&nodes[2], &nodes[1])[..], 1000000);
+
+ // Test failure packets
+ let payment_hash_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], 1000000).1;
+ fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3])[..], payment_hash_1);
+
+ // Add a new channel that skips 3
+ let chan_4 = create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
+
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 1000000);
+ send_payment(&nodes[2], &vec!(&nodes[3])[..], 1000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+ send_payment(&nodes[1], &vec!(&nodes[3])[..], 8000000);
+
+ // Do some rebalance loop payments, simultaneously
+ let mut hops = Vec::with_capacity(3);
+ hops.push(RouteHop {
+ pubkey: nodes[2].node.get_our_node_id(),
+ short_channel_id: chan_2.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_3.0.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ short_channel_id: chan_3.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_4.1.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[1].node.get_our_node_id(),
+ short_channel_id: chan_4.0.contents.short_channel_id,
+ fee_msat: 1000000,
+ cltv_expiry_delta: TEST_FINAL_CLTV,
+ });
+ hops[1].fee_msat = chan_4.1.contents.fee_base_msat as u64 + chan_4.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+ hops[0].fee_msat = chan_3.0.contents.fee_base_msat as u64 + chan_3.0.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+ let payment_preimage_1 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[2], &nodes[3], &nodes[1])[..], 1000000).0;
+
+ let mut hops = Vec::with_capacity(3);
+ hops.push(RouteHop {
+ pubkey: nodes[3].node.get_our_node_id(),
+ short_channel_id: chan_4.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_3.1.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[2].node.get_our_node_id(),
+ short_channel_id: chan_3.0.contents.short_channel_id,
+ fee_msat: 0,
+ cltv_expiry_delta: chan_2.1.contents.cltv_expiry_delta as u32
+ });
+ hops.push(RouteHop {
+ pubkey: nodes[1].node.get_our_node_id(),
+ short_channel_id: chan_2.0.contents.short_channel_id,
+ fee_msat: 1000000,
+ cltv_expiry_delta: TEST_FINAL_CLTV,
+ });
+ hops[1].fee_msat = chan_2.1.contents.fee_base_msat as u64 + chan_2.1.contents.fee_proportional_millionths as u64 * hops[2].fee_msat as u64 / 1000000;
+ hops[0].fee_msat = chan_3.1.contents.fee_base_msat as u64 + chan_3.1.contents.fee_proportional_millionths as u64 * hops[1].fee_msat as u64 / 1000000;
+ let payment_hash_2 = send_along_route(&nodes[1], Route { hops }, &vec!(&nodes[3], &nodes[2], &nodes[1])[..], 1000000).1;
+
+ // Claim the rebalances...
+ fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
+ claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
+
+ // Add a duplicate new channel from 2 to 4
+ let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
+
+ // Send some payments across both channels
+ let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+ let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+ let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
+
+ route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
+
+ //TODO: Test that routes work again here as we've been notified that the channel is full
+
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
+
+ // Close down the channels...
+ close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
+ close_channel(&nodes[1], &nodes[2], &chan_2.2, chan_2.3, false);
+ close_channel(&nodes[2], &nodes[3], &chan_3.2, chan_3.3, true);
+ close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
+ close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
+}
+
+#[test]
+fn holding_cell_htlc_counting() {
+ // Tests that HTLCs in the holding cell count towards the pending HTLC limits on outbound HTLCs
+ // to ensure we don't end up with HTLCs sitting around in our holding cell for several
+ // commitment dance rounds.
+ let mut nodes = create_network(3, &[None, None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let mut payments = Vec::new();
+ for _ in 0..::ln::channel::OUR_MAX_HTLCS {
+ let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[1].node.send_payment(route, payment_hash).unwrap();
+ payments.push((payment_preimage, payment_hash));
+ }
+ check_added_monitors!(nodes[1], 1);
+
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let initial_payment_event = SendEvent::from_event(events.pop().unwrap());
+ assert_eq!(initial_payment_event.node_id, nodes[2].node.get_our_node_id());
+
+ // There is now one HTLC in an outbound commitment transaction and (OUR_MAX_HTLCS - 1) HTLCs in
+ // the holding cell waiting on B's RAA to send. At this point we should not be able to add
+ // another HTLC.
+ let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+ if let APIError::ChannelUnavailable { err } = nodes[1].node.send_payment(route, payment_hash_1).unwrap_err() {
+ assert_eq!(err, "Cannot push more than their max accepted HTLCs");
+ } else { panic!("Unexpected event"); }
+
+ // This should also be true if we try to forward a payment.
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let payment_event = SendEvent::from_event(events.pop().unwrap());
+ assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+ // fails), the second will process the resulting failure and fail the HTLC backward.
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_fail_updates.update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], bs_fail_updates.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg }} => {
+ assert_eq!(msg.contents.short_channel_id, chan_2.0.contents.short_channel_id);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
+ assert_eq!(payment_hash, payment_hash_2);
+ assert!(!rejected_by_dest);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ // Now forward all the pending HTLCs and claim them back
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &initial_payment_event.msgs[0]).unwrap();
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &initial_payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let as_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+
+ for ref update in as_updates.update_add_htlcs.iter() {
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), update).unwrap();
+ }
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[2], 1);
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[2], 1);
+ let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &bs_commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let as_final_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_final_raa).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), payments.len());
+ for (event, &(_, ref hash)) in events.iter().zip(payments.iter()) {
+ match event {
+ &Event::PaymentReceived { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, *hash);
+ },
+ _ => panic!("Unexpected event"),
+ };
+ }
+
+ for (preimage, _) in payments.drain(..) {
+ claim_payment(&nodes[1], &[&nodes[2]], preimage);
+ }
+
+ send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000);
+}
+
+#[test]
+fn duplicate_htlc_test() {
+ // Test that we accept duplicate payment_hash HTLCs across the network and that
+ // claiming/failing them are all separate and don't affect each other
+ let mut nodes = create_network(6, &[None, None, None, None, None, None]);
+
+ // Create some initial channels to route via 3 to 4/5 from 0/1/2
+ create_announced_chan_between_nodes(&nodes, 0, 3, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 3, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 3, 5, LocalFeatures::new(), LocalFeatures::new());
+
+ let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], 1000000);
+
+ *nodes[0].network_payment_count.borrow_mut() -= 1;
+ assert_eq!(route_payment(&nodes[1], &vec!(&nodes[3])[..], 1000000).0, payment_preimage);
+
+ *nodes[0].network_payment_count.borrow_mut() -= 1;
+ assert_eq!(route_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], 1000000).0, payment_preimage);
+
+ claim_payment(&nodes[0], &vec!(&nodes[3], &nodes[4])[..], payment_preimage);
+ fail_payment(&nodes[2], &vec!(&nodes[3], &nodes[5])[..], payment_hash);
+ claim_payment(&nodes[1], &vec!(&nodes[3])[..], payment_preimage);
+}
+
++#[test]
++fn test_duplicate_htlc_different_direction_onchain() {
++ // Test that ChannelMonitor doesn't generate 2 preimage txn
++ // when we have 2 HTLCs with same preimage that go across a node
++ // in opposite directions.
++ let nodes = create_network(2, &[None, None]);
++
++ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
++
++ // balancing
++ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
++
++ let (payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 900_000);
++
++ let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &Vec::new(), 800_000, TEST_FINAL_CLTV).unwrap();
++ send_along_route_with_hash(&nodes[1], route, &vec!(&nodes[0])[..], 800_000, payment_hash);
++
++ // Provide preimage to node 0 by claiming payment
++ nodes[0].node.claim_funds(payment_preimage);
++ check_added_monitors!(nodes[0], 1);
++
++ // Broadcast node 1 commitment txn
++ let remote_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
++
++ assert_eq!(remote_txn[0].output.len(), 4); // 1 local, 1 remote, 1 htlc inbound, 1 htlc outbound
++ let mut has_both_htlcs = 0; // check htlcs match ones committed
++ for outp in remote_txn[0].output.iter() {
++ if outp.value == 800_000 / 1000 {
++ has_both_htlcs += 1;
++ } else if outp.value == 900_000 / 1000 {
++ has_both_htlcs += 1;
++ }
++ }
++ assert_eq!(has_both_htlcs, 2);
++
++ let header = BlockHeader { version: 0x2000_0000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
++
++ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![remote_txn[0].clone()] }, 1);
++
++ // Check we only broadcast 1 timeout tx
++ let claim_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
++ let htlc_pair = if claim_txn[0].output[0].value == 800_000 / 1000 { (claim_txn[0].clone(), claim_txn[1].clone()) } else { (claim_txn[1].clone(), claim_txn[0].clone()) };
++ assert_eq!(claim_txn.len(), 6);
++ assert_eq!(htlc_pair.0.input.len(), 1);
++ assert_eq!(htlc_pair.0.input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC 1 <--> 0, preimage tx
++ check_spends!(htlc_pair.0, remote_txn[0].clone());
++ assert_eq!(htlc_pair.1.input.len(), 1);
++ assert_eq!(htlc_pair.1.input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // HTLC 0 <--> 1, timeout tx
++ check_spends!(htlc_pair.1, remote_txn[0].clone());
++
++ let events = nodes[0].node.get_and_clear_pending_msg_events();
++ assert_eq!(events.len(), 2);
++ for e in events {
++ match e {
++ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
++ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
++ assert!(update_add_htlcs.is_empty());
++ assert!(update_fail_htlcs.is_empty());
++ assert_eq!(update_fulfill_htlcs.len(), 1);
++ assert!(update_fail_malformed_htlcs.is_empty());
++ assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
++ },
++ _ => panic!("Unexpected event"),
++ }
++ }
++}
++
+fn do_channel_reserve_test(test_recv: bool) {
- HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
++ use ln::msgs::LightningError;
+
+ let mut nodes = create_network(3, &[None, None, None]);
+ let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1900, 1001, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1900, 1001, LocalFeatures::new(), LocalFeatures::new());
+
+ let mut stat01 = get_channel_value_stat!(nodes[0], chan_1.2);
+ let mut stat11 = get_channel_value_stat!(nodes[1], chan_1.2);
+
+ let mut stat12 = get_channel_value_stat!(nodes[1], chan_2.2);
+ let mut stat22 = get_channel_value_stat!(nodes[2], chan_2.2);
+
+ macro_rules! get_route_and_payment_hash {
+ ($recv_value: expr) => {{
+ let route = nodes[0].router.get_route(&nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ (route, payment_hash, payment_preimage)
+ }}
+ };
+
+ macro_rules! expect_forward {
+ ($node: expr) => {{
+ let mut events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ check_added_monitors!($node, 1);
+ let payment_event = SendEvent::from_event(events.remove(0));
+ payment_event
+ }}
+ }
+
+ let feemsat = 239; // somehow we know?
+ let total_fee_msat = (nodes.len() - 2) as u64 * 239;
+
+ let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
+
+ // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
+ assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
+ let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
+ match err {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ let mut htlc_id = 0;
+ // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
+ // nodes[0]'s wealth
+ loop {
+ let amt_msat = recv_value_0 + total_fee_msat;
+ if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
+ break;
+ }
+ send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
+ htlc_id += 1;
+
+ let (stat01_, stat11_, stat12_, stat22_) = (
+ get_channel_value_stat!(nodes[0], chan_1.2),
+ get_channel_value_stat!(nodes[1], chan_1.2),
+ get_channel_value_stat!(nodes[1], chan_2.2),
+ get_channel_value_stat!(nodes[2], chan_2.2),
+ );
+
+ assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
+ assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
+ assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
+ assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
+ stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
+ }
+
+ {
+ let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
+ // attempt to get channel_reserve violation
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
+ let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
+ match err {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ // adding pending output
+ let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
+ let amt_msat_1 = recv_value_1 + total_fee_msat;
+
+ let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
+ let payment_event_1 = {
+ nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
+
+ // channel reserve test with htlc pending output > 0
+ let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+ match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ {
+ // test channel_reserve test on nodes[1] side
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+
+ // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
+ let secp_ctx = Secp256k1::new();
+ let session_priv = SecretKey::from_slice(&{
+ let mut session_key = [0; 32];
+ let mut rng = thread_rng();
+ rng.fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+ let msg = msgs::UpdateAddHTLC {
+ channel_id: chan_1.2,
+ htlc_id,
+ amount_msat: htlc_msat,
+ payment_hash: our_payment_hash,
+ cltv_expiry: htlc_cltv,
+ onion_routing_packet: onion_packet,
+ };
+
+ if test_recv {
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
+ match err {
- if let Err(msgs::HandleError { action: Some(msgs::ErrorAction::SendErrorMessage { msg }), .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
++ LightningError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
+ }
+ // If we send a garbage message, the channel should get closed, making the rest of this test case fail.
+ assert_eq!(nodes[1].node.list_channels().len(), 1);
+ assert_eq!(nodes[1].node.list_channels().len(), 1);
+ check_closed_broadcast!(nodes[1]);
+ return;
+ }
+ }
+
+ // split the rest to test holding cell
+ let recv_value_21 = recv_value_2/2;
+ let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
+ {
+ let stat = get_channel_value_stat!(nodes[0], chan_1.2);
+ assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
+ }
+
+ // now see if they go through on both sides
+ let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
+ // but this will stuck in the holding cell
+ nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 0);
+
+ // test with outbound holding cell amount > 0
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
+ match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over their reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
+ // this will also stuck in the holding cell
+ nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ assert!(nodes[0].node.get_and_clear_pending_events().is_empty());
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ // flush the pending htlc
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
+ let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let commitment_update_2 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed).unwrap();
+ let bs_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let ref payment_event_11 = expect_forward!(nodes[1]);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
+
+ // flush the htlcs in the holding cell
+ assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let ref payment_event_3 = expect_forward!(nodes[1]);
+ assert_eq!(payment_event_3.msgs.len(), 2);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
+
+ commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(our_payment_hash_21, *payment_hash);
+ assert_eq!(recv_value_21, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(our_payment_hash_22, *payment_hash);
+ assert_eq!(recv_value_22, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
+
+ let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
+ let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
+ assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
+ assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
+
+ let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
+ assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
+}
+
+#[test]
+fn channel_reserve_test() {
+ do_channel_reserve_test(false);
+ do_channel_reserve_test(true);
+}
+
+#[test]
+fn channel_reserve_in_flight_removes() {
+ // In cases where one side claims an HTLC, it thinks it has additional available funds that it
+ // can send to its counterparty, but due to update ordering, the other side may not yet have
+ // considered those HTLCs fully removed.
+ // This tests that we don't count HTLCs which will not be included in the next remote
+ // commitment transaction towards the reserve value (as it implies no commitment transaction
+ // will be generated which violates the remote reserve value).
+ // This was broken previously, and discovered by the chanmon_fail_consistency fuzz test.
+ // To test this we:
+ // * route two HTLCs from A to B (note that, at a high level, this test is checking that, when
+ // you consider the values of both of these HTLCs, B may not send an HTLC back to A, but if
+ // you only consider the value of the first HTLC, it may not),
+ // * start routing a third HTLC from A to B,
+ // * claim the first two HTLCs (though B will generate an update_fulfill for one, and put
+ // the other claim in its holding cell, as it immediately goes into AwaitingRAA),
+ // * deliver the first fulfill from B
+ // * deliver the update_add and an RAA from A, resulting in B freeing the second holding cell
+ // claim,
+ // * deliver A's response CS and RAA.
+ // This results in A having the second HTLC in AwaitingRemovedRemoteRevoke, but B having
+ // removed it fully. B now has the push_msat plus the first two HTLCs in value.
+ // * Now B happily sends another HTLC, potentially violating its reserve value from A's point
+ // of view (if A counts the AwaitingRemovedRemoteRevoke HTLC).
+ let mut nodes = create_network(2, &[None, None]);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let b_chan_values = get_channel_value_stat!(nodes[1], chan_1.2);
+ // Route the first two HTLCs.
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], b_chan_values.channel_reserve_msat - b_chan_values.value_to_self_msat - 10000);
+ let (payment_preimage_2, _) = route_payment(&nodes[0], &[&nodes[1]], 20000);
+
+ // Start routing the third HTLC (this is just used to get everyone in the right state).
+ let (payment_preimage_3, payment_hash_3) = get_payment_preimage_hash!(nodes[0]);
+ let send_1 = {
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ nodes[0].node.send_payment(route, payment_hash_3).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+
+ // Now claim both of the first two HTLCs on B's end, putting B in AwaitingRAA and generating an
+ // initial fulfill/CS.
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+ let bs_removes = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ // This claim goes in B's holding cell, allowing us to have a pending B->A RAA which does not
+ // remove the second HTLC when we send the HTLC back from B to A.
+ assert!(nodes[1].node.claim_funds(payment_preimage_2));
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_removes.update_fulfill_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ expect_payment_sent!(nodes[0], payment_preimage_1);
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ // B is already AwaitingRAA, so cant generate a CS here
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ // The second HTLCis removed, but as A is in AwaitingRAA it can't generate a CS here, so the
+ // RAA that B generated above doesn't fully resolve the second HTLC from A's point of view.
+ // However, the RAA A generates here *does* fully resolve the HTLC from B's point of view (as A
+ // can no longer broadcast a commitment transaction with it and B has the preimage so can go
+ // on-chain as necessary).
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_cs.update_fulfill_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ expect_payment_sent!(nodes[0], payment_preimage_2);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], payment_hash_3, 100000);
+
+ // Note that as this RAA was generated before the delivery of the update_fulfill it shouldn't
+ // resolve the second HTLC from A's point of view.
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ // Now that B doesn't have the second RAA anymore, but A still does, send a payment from B back
+ // to A to ensure that A doesn't count the almost-removed HTLC in update_add processing.
+ let (payment_preimage_4, payment_hash_4) = get_payment_preimage_hash!(nodes[1]);
+ let send_2 = {
+ let route = nodes[1].router.get_route(&nodes[0].node.get_our_node_id(), None, &[], 10000, TEST_FINAL_CLTV).unwrap();
+ nodes[1].node.send_payment(route, payment_hash_4).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let mut events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+
+ nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_2.msgs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_2.commitment_msg).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+
+ // Now just resolve all the outstanding messages/HTLCs for completeness...
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_cs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_cs.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[0]);
+ expect_payment_received!(nodes[0], payment_hash_4, 10000);
+
+ claim_payment(&nodes[1], &[&nodes[0]], payment_preimage_4);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_3);
+}
+
+#[test]
+fn channel_monitor_network_test() {
+ // Simple test which builds a network of ChannelManagers, connects them to each other, and
+ // tests that ChannelMonitor is able to recover from various states.
+ let nodes = create_network(5, &[None, None, None, None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+ let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
+ let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance the network a bit by relaying one payment through all the channels...
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+
+ // Simple case with no pending HTLCs:
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+ {
+ let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+ test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 1);
+
+ // One pending HTLC is discarded by the force-close:
+ let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
+
+ // Simple case of one pending HTLC to HTLC-Timeout
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
+ {
+ let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+ test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
+ }
+ get_announce_close_broadcast_events(&nodes, 1, 2);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+ assert_eq!(nodes[2].node.list_channels().len(), 1);
+
+ macro_rules! claim_funds {
+ ($node: expr, $prev_node: expr, $preimage: expr) => {
+ {
+ assert!($node.node.claim_funds($preimage));
+ check_added_monitors!($node, 1);
+
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert_eq!(*node_id, $prev_node.node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ };
+ }
+ }
+ }
+
+ // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
+ // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+ {
+ let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
+
+ // Claim the payment on nodes[3], giving it knowledge of the preimage
+ claim_funds!(nodes[3], nodes[2], payment_preimage_1);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[3].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
+
+ check_preimage_claim(&nodes[3], &node_txn);
+ }
+ get_announce_close_broadcast_events(&nodes, 2, 3);
+ assert_eq!(nodes[2].node.list_channels().len(), 0);
+ assert_eq!(nodes[3].node.list_channels().len(), 1);
+
+ { // Cheat and reset nodes[4]'s height to 1
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1);
+ }
+
+ assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
+ assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1);
+ // One pending HTLC to time out:
+ let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
+ // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
+ // buffer space).
+
+ {
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+ for i in 3..TEST_FINAL_CLTV + 2 + LATENCY_GRACE_PERIOD_BLOCKS + 1 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
+ }
+
+ let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
+
+ // Claim the payment on nodes[4], giving it knowledge of the preimage
+ claim_funds!(nodes[4], nodes[3], payment_preimage_2);
+
+ header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+ for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
+ }
+
+ test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
+
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
+
+ check_preimage_claim(&nodes[4], &node_txn);
+ }
+ get_announce_close_broadcast_events(&nodes, 3, 4);
+ assert_eq!(nodes[3].node.list_channels().len(), 0);
+ assert_eq!(nodes[4].node.list_channels().len(), 0);
+}
+
+#[test]
+fn test_justice_tx() {
+ // Test justice txn built on revoked HTLC-Success tx, against both sides
+
+ let mut alice_config = UserConfig::new();
+ alice_config.channel_options.announced_channel = true;
+ alice_config.peer_channel_config_limits.force_announced_channel_preference = false;
+ alice_config.own_channel_config.our_to_self_delay = 6 * 24 * 5;
+ let mut bob_config = UserConfig::new();
+ bob_config.channel_options.announced_channel = true;
+ bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
+ bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
+ let nodes = create_network(2, &[Some(alice_config), Some(bob_config)]);
+ // Create some new channels:
+ let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // A pending HTLC which will be revoked:
+ let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ // Get the will-be-revoked local txn from nodes[0]
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
+ assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
+ assert_eq!(revoked_local_txn[1].input.len(), 1);
+ assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
+ assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
+ // Revoke the old state
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
+
+ {
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ {
+ let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
+ assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
+
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+ node_txn.swap_remove(0);
+ }
+ test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
+
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
+ test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+ // We test justice_tx build by A on B's revoked HTLC-Success tx
+ // Create some new channels:
+ let chan_6 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // A pending HTLC which will be revoked:
+ let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ // Get the will-be-revoked local txn from B
+ let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn.len(), 1); // Only commitment tx
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_6.3.txid());
+ assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to A are present
+ // Revoke the old state
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_4);
+ {
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ {
+ let mut node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
+ assert_eq!(node_txn[0].input.len(), 1); // We claim the received HTLC output
+
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+ node_txn.swap_remove(0);
+ }
+ test_txn_broadcast(&nodes[0], &chan_6, None, HTLCType::NONE);
+
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ let node_txn = test_txn_broadcast(&nodes[1], &chan_6, Some(revoked_local_txn[0].clone()), HTLCType::SUCCESS);
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
+ test_revoked_htlc_claim_txn_broadcast(&nodes[0], node_txn[1].clone());
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+}
+
+#[test]
+fn revoked_output_claim() {
+ // Simple test to ensure a node will claim a revoked output when a stale remote commitment
+ // transaction is broadcast by its counterparty
+ let nodes = create_network(2, &[None, None]);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn.len(), 1);
+ // Only output is the full channel value back to nodes[0]:
+ assert_eq!(revoked_local_txn[0].output.len(), 1);
+ // Send a payment through, updating everyone's latest commitment txn
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
+
+ // Inform nodes[1] that nodes[0] broadcast a stale tx
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once
+
+ assert_eq!(node_txn[0], node_txn[2]);
+
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+ check_spends!(node_txn[1], chan_1.3.clone());
+
+ // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+}
+
+#[test]
+fn claim_htlc_outputs_shared_tx() {
+ // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
+ let nodes = create_network(2, &[None, None]);
+
+ // Create some new channel:
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance the network to generate htlc in the two directions
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
+ let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
+
+ // Get the will-be-revoked local txn from node[0]
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
+ assert_eq!(revoked_local_txn[1].input.len(), 1);
+ assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
+ assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); // HTLC-Timeout
+ check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone());
+
+ //Revoke the old state
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
+
+ {
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, payment_hash_2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 4);
+
+ assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+
+ assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning
+
+ let mut witness_lens = BTreeSet::new();
+ witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
+ assert_eq!(witness_lens.len(), 3);
+ assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
+ assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
+ assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
+
+ // Next nodes[1] broadcasts its current local tx state:
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
+
+ assert_eq!(node_txn[2].input.len(), 1);
+ let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap();
+ assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
+ assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid());
+ assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
+ assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid);
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+}
+
+#[test]
+fn claim_htlc_outputs_single_tx() {
+ // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
+ let nodes = create_network(2, &[None, None]);
+
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance the network to generate htlc in the two directions
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
+ // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
+ let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let (_payment_preimage_2, payment_hash_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000);
+
+ // Get the will-be-revoked local txn from node[0]
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+
+ //Revoke the old state
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
+
+ {
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+ connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, payment_hash_2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 22); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan) + 5 * (1 local commitment tx + 1 htlc timeout tx)
+
+ assert_eq!(node_txn[0], node_txn[7]);
+ assert_eq!(node_txn[1], node_txn[8]);
+ assert_eq!(node_txn[2], node_txn[9]);
+ assert_eq!(node_txn[3], node_txn[10]);
+ assert_eq!(node_txn[4], node_txn[11]);
+ assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcasted by ChannelManger
+ assert_eq!(node_txn[4], node_txn[6]);
+
+ for i in 12..22 {
+ if i % 2 == 0 { assert_eq!(node_txn[3], node_txn[i]); } else { assert_eq!(node_txn[4], node_txn[i]); }
+ }
+
+ assert_eq!(node_txn[0].input.len(), 1);
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_eq!(node_txn[2].input.len(), 1);
+
+ fn get_txout(out_point: &BitcoinOutPoint, tx: &Transaction) -> Option<TxOut> {
+ if out_point.txid == tx.txid() {
+ tx.output.get(out_point.vout as usize).cloned()
+ } else {
+ None
+ }
+ }
+ node_txn[0].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
+ node_txn[1].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
+ node_txn[2].verify(|out|get_txout(out, &revoked_local_txn[0])).unwrap();
+
+ let mut witness_lens = BTreeSet::new();
+ witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
+ assert_eq!(witness_lens.len(), 3);
+ assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
+ assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC
+ assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC
+
+ assert_eq!(node_txn[3].input.len(), 1);
+ check_spends!(node_txn[3], chan_1.3.clone());
+
+ assert_eq!(node_txn[4].input.len(), 1);
+ let witness_script = node_txn[4].input[0].witness.last().unwrap();
+ assert_eq!(witness_script.len(), OFFERED_HTLC_SCRIPT_WEIGHT); //Spending an offered htlc output
+ assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
+ assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
+ assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+}
+
+#[test]
+fn test_htlc_on_chain_success() {
+ // Test that in case of a unilateral close onchain, we detect the state of output thanks to
+ // ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is
+ // broadcasting the right event to other nodes in payment path.
+ // We test with two HTLCs simultaneously as that was not handled correctly in the past.
+ // A --------------------> B ----------------------> C (preimage)
+ // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
+ // commitment transaction was broadcast.
+ // Then, B should learn the preimage from said transactions, attempting to claim backwards
+ // towards B.
+ // B should be able to claim via preimage if A then broadcasts its local tx.
+ // Finally, when A sees B's latest local commitment transaction it should be able to claim
+ // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
+ // PaymentSent event).
+
+ let nodes = create_network(3, &[None, None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance the network a bit by relaying one payment through all the channels...
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
+
+ let (our_payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+ let (our_payment_preimage_2, _payment_hash_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+
+ // Broadcast legit commitment tx from C on B's chain
+ // Broadcast HTLC Success transaction by C on received output from C's commitment tx on B's chain
+ let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(commitment_tx.len(), 1);
+ check_spends!(commitment_tx[0], chan_2.3.clone());
+ nodes[2].node.claim_funds(our_payment_preimage);
+ nodes[2].node.claim_funds(our_payment_preimage_2);
+ check_added_monitors!(nodes[2], 2);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+
+ nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+ check_closed_broadcast!(nodes[2]);
+ let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx)
+ assert_eq!(node_txn.len(), 5);
+ assert_eq!(node_txn[0], node_txn[3]);
+ assert_eq!(node_txn[1], node_txn[4]);
+ assert_eq!(node_txn[2], commitment_tx[0]);
+ check_spends!(node_txn[0], commitment_tx[0].clone());
+ check_spends!(node_txn[1], commitment_tx[0].clone());
+ assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert_eq!(node_txn[0].lock_time, 0);
+ assert_eq!(node_txn[1].lock_time, 0);
+
+ // Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn}, 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ {
+ let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
+ assert_eq!(added_monitors.len(), 2);
+ assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
+ assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
+ added_monitors.clear();
+ }
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
+ },
+ _ => panic!("Unexpected event"),
+ };
+ macro_rules! check_tx_local_broadcast {
+ ($node: expr, $htlc_offered: expr, $commitment_tx: expr, $chan_tx: expr) => { {
+ // ChannelManager : 3 (commitment tx, 2*HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 (block-rescan)
+ let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 7);
+ assert_eq!(node_txn[0], node_txn[5]);
+ assert_eq!(node_txn[1], node_txn[6]);
+ check_spends!(node_txn[0], $commitment_tx.clone());
+ check_spends!(node_txn[1], $commitment_tx.clone());
+ assert_ne!(node_txn[0].lock_time, 0);
+ assert_ne!(node_txn[1].lock_time, 0);
+ if $htlc_offered {
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ } else {
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ }
+ check_spends!(node_txn[2], $chan_tx.clone());
+ check_spends!(node_txn[3], node_txn[2].clone());
+ check_spends!(node_txn[4], node_txn[2].clone());
+ assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), 71);
+ assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[4].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert!(node_txn[3].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[4].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert_ne!(node_txn[3].lock_time, 0);
+ assert_ne!(node_txn[4].lock_time, 0);
+ node_txn.clear();
+ } }
+ }
+ // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate
+ // commitment transaction with a corresponding HTLC-Timeout transactions, as well as a
+ // timeout-claim of the output that nodes[2] just claimed via success.
+ check_tx_local_broadcast!(nodes[1], false, commitment_tx[0], chan_2.3);
+
+ // Broadcast legit commitment tx from A on B's chain
+ // Broadcast preimage tx by B on offered output from A commitment tx on A's chain
+ let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ check_spends!(commitment_tx[0], chan_1.3.clone());
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+ check_closed_broadcast!(nodes[1]);
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan)
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn[0], node_txn[2]);
+ check_spends!(node_txn[0], commitment_tx[0].clone());
+ assert_eq!(node_txn[0].input.len(), 2);
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[0].input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[0].lock_time, 0);
+ assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ check_spends!(node_txn[1], chan_1.3.clone());
+ assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
+ // We don't bother to check that B can claim the HTLC output on its commitment tx here as
+ // we already checked the same situation with A.
+
+ // Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[0]);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ let mut first_claimed = false;
+ for event in events {
+ match event {
+ Event::PaymentSent { payment_preimage } => {
+ if payment_preimage == our_payment_preimage {
+ assert!(!first_claimed);
+ first_claimed = true;
+ } else {
+ assert_eq!(payment_preimage, our_payment_preimage_2);
+ }
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ check_tx_local_broadcast!(nodes[0], true, commitment_tx[0], chan_1.3);
+}
+
+#[test]
+fn test_htlc_on_chain_timeout() {
+ // Test that in case of a unilateral close onchain, we detect the state of output thanks to
+ // ChainWatchInterface and timeout the HTLC backward accordingly. So here we test that ChannelManager is
+ // broadcasting the right event to other nodes in payment path.
+ // A ------------------> B ----------------------> C (timeout)
+ // B's commitment tx C's commitment tx
+ // \ \
+ // B's HTLC timeout tx B's timeout tx
+
+ let nodes = create_network(3, &[None, None, None]);
+
+ // Create some intial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance the network a bit by relaying one payment thorugh all the channels...
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
+
+ let (_payment_preimage, payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+
+ // Broadcast legit commitment tx from C on B's chain
+ let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+ check_spends!(commitment_tx[0], chan_2.3.clone());
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ check_added_monitors!(nodes[2], 0);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(!update_fail_htlcs.is_empty());
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert_eq!(nodes[1].node.get_our_node_id(), *node_id);
+ },
+ _ => panic!("Unexpected event"),
+ };
+ nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+ check_closed_broadcast!(nodes[2]);
+ let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], chan_2.3.clone());
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), 71);
+
+ // Broadcast timeout transaction by B on received output from C's commitment tx on B's chain
+ // Verify that B's ChannelManager is able to detect that HTLC is timeout by its own tx and react backward in consequence
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
+ let timeout_tx;
+ {
+ let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 8); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 6 (HTLC-Timeout tx, commitment tx, timeout tx) * 2 (block-rescan)
+ assert_eq!(node_txn[0], node_txn[5]);
+ assert_eq!(node_txn[1], node_txn[6]);
+ assert_eq!(node_txn[2], node_txn[7]);
+ check_spends!(node_txn[0], commitment_tx[0].clone());
+ assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(node_txn[1], chan_2.3.clone());
+ check_spends!(node_txn[2], node_txn[1].clone());
+ assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71);
+ assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(node_txn[3], chan_2.3.clone());
+ check_spends!(node_txn[4], node_txn[3].clone());
+ assert_eq!(node_txn[3].input[0].witness.clone().last().unwrap().len(), 71);
+ assert_eq!(node_txn[4].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ timeout_tx = node_txn[0].clone();
+ node_txn.clear();
+ }
+
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1);
+ connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+ check_added_monitors!(nodes[1], 0);
+ check_closed_broadcast!(nodes[1]);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(!update_fail_htlcs.is_empty());
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
+ },
+ _ => panic!("Unexpected event"),
+ };
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // Well... here we detect our own htlc_timeout_tx so no tx to be generated
+ assert_eq!(node_txn.len(), 0);
+
+ // Broadcast legit commitment tx from B on A's chain
+ let commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ check_spends!(commitment_tx[0], chan_1.3.clone());
+
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
+ check_closed_broadcast!(nodes[0]);
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 block-rescan
+ assert_eq!(node_txn.len(), 4);
+ assert_eq!(node_txn[0], node_txn[3]);
+ check_spends!(node_txn[0], commitment_tx[0].clone());
+ assert_eq!(node_txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(node_txn[1], chan_1.3.clone());
+ check_spends!(node_txn[2], node_txn[1].clone());
+ assert_eq!(node_txn[1].clone().input[0].witness.last().unwrap().len(), 71);
+ assert_eq!(node_txn[2].clone().input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+}
+
+#[test]
+fn test_simple_commitment_revoked_fail_backward() {
+ // Test that in case of a revoked commitment tx, we detect the resolution of output by justice tx
+ // and fail backward accordingly.
+
+ let nodes = create_network(3, &[None, None, None]);
+
+ // Create some initial channels
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+ // Get the will-be-revoked local txn from nodes[2]
+ let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+ // Revoke the old state
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+
+ route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+ check_added_monitors!(nodes[1], 0);
+ check_closed_broadcast!(nodes[1]);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fail_htlcs.len(), 1);
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
+fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
+ // Test that if our counterparty broadcasts a revoked commitment transaction we fail all
+ // pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
+ // commitment transaction anymore.
+ // To do this, we have the peer which will broadcast a revoked commitment transaction send
+ // a number of update_fail/commitment_signed updates without ever sending the RAA in
+ // response to our commitment_signed. This is somewhat misbehavior-y, though not
+ // technically disallowed and we should probably handle it reasonably.
+ // Note that this is pretty exhaustive as an outbound HTLC which we haven't yet
+ // failed/fulfilled backwards must be in at least one of the latest two remote commitment
+ // transactions:
+ // * Once we move it out of our holding cell/add it, we will immediately include it in a
+ // commitment_signed (implying it will be in the latest remote commitment transaction).
+ // * Once they remove it, we will send a (the first) commitment_signed without the HTLC,
+ // and once they revoke the previous commitment transaction (allowing us to send a new
+ // commitment_signed) we will be free to fail/fulfill the HTLC backwards.
+ let mut nodes = create_network(3, &[None, None, None]);
+
+ // Create some initial channels
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
+ // Get the will-be-revoked local txn from nodes[2]
+ let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
+ // Revoke the old state
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
+
+ let value = if use_dust {
+ // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
+ // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
+ nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().our_dust_limit_satoshis * 1000
+ } else { 3000000 };
+
+ let (_, first_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+ let (_, second_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+ let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+
+ assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash));
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
+ let bs_raa = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true);
+ // Drop the last RAA from 3 -> 2
+
+ assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash));
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ // Note that nodes[1] is in AwaitingRAA, so won't send a CS
+ let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash));
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fulfill_htlcs.is_empty());
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert_eq!(updates.update_fail_htlcs.len(), 1);
+ assert!(updates.update_fee.is_none());
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
+ // At this point first_payment_hash has dropped out of the latest two commitment
+ // transactions that nodes[1] is tracking...
+ nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ // Note that nodes[1] is (still) in AwaitingRAA, so won't send a CS
+ let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_raa).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ // Add a fourth HTLC, this one will get sequestered away in nodes[1]'s holding cell waiting
+ // on nodes[2]'s RAA.
+ let route = nodes[1].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, fourth_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[1].node.send_payment(route, fourth_payment_hash).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+ check_added_monitors!(nodes[1], 0);
+
+ if deliver_bs_raa {
+ nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa).unwrap();
+ // One monitor for the new revocation preimage, no second on as we won't generate a new
+ // commitment transaction for nodes[0] until process_pending_htlc_forwards().
+ check_added_monitors!(nodes[1], 1);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ // Deliberately don't process the pending fail-back so they all fail back at once after
+ // block connection just like the !deliver_bs_raa case
+ }
+
+ let mut failed_htlcs = HashSet::new();
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
+ match events[0] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, fourth_payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ if !deliver_bs_raa {
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ }
+ nodes[1].node.process_pending_htlc_forwards();
+ check_added_monitors!(nodes[1], 1);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 });
+ match events[if deliver_bs_raa { 1 } else { 0 }] {
+ MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
+ _ => panic!("Unexpected event"),
+ }
+ if deliver_bs_raa {
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
+ assert_eq!(nodes[2].node.get_our_node_id(), *node_id);
+ assert_eq!(update_add_htlcs.len(), 1);
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ match events[if deliver_bs_raa { 2 } else { 1 }] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fail_htlcs.len(), 3);
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap();
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]).unwrap();
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]).unwrap();
+
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ // If we delivered B's RAA we got an unknown preimage error, not something
+ // that we should update our routing table for.
+ assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
+ for event in events {
+ match event {
+ MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ }
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 3);
+ match events[0] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert!(failed_htlcs.insert(payment_hash.0));
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert!(failed_htlcs.insert(payment_hash.0));
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[2] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert!(failed_htlcs.insert(payment_hash.0));
+ },
+ _ => panic!("Unexpected event"),
+ }
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ assert!(failed_htlcs.contains(&first_payment_hash.0));
+ assert!(failed_htlcs.contains(&second_payment_hash.0));
+ assert!(failed_htlcs.contains(&third_payment_hash.0));
+}
+
+#[test]
+fn test_commitment_revoked_fail_backward_exhaustive_a() {
+ do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
+ do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
+}
+
+#[test]
+fn test_commitment_revoked_fail_backward_exhaustive_b() {
+ do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
+ do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
+}
+
+#[test]
+fn test_htlc_ignore_latest_remote_commitment() {
+ // Test that HTLC transactions spending the latest remote commitment transaction are simply
+ // ignored if we cannot claim them. This originally tickled an invalid unwrap().
+ let nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ route_payment(&nodes[0], &[&nodes[1]], 10000000);
+ nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
+ check_closed_broadcast!(nodes[0]);
+
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 2);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
+ check_closed_broadcast!(nodes[1]);
+
+ // Duplicate the block_connected call since this may happen due to other listeners
+ // registering new transactions
+ nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
+}
+
+#[test]
+fn test_force_close_fail_back() {
+ // Check which HTLCs are failed-backwards on channel force-closure
+ let mut nodes = create_network(3, &[None, None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
+
+ let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let mut payment_event = {
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ payment_event = SendEvent::from_event(events_2.remove(0));
+ assert_eq!(payment_event.msgs.len(), 1);
+
+ check_added_monitors!(nodes[1], 1);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[2], 1);
+ let (_, _) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+
+ // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
+ // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
+ // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
+
+ nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
+ check_closed_broadcast!(nodes[2]);
+ let tx = {
+ let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
+ // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
+ // back to nodes[1] upon timeout otherwise.
+ assert_eq!(node_txn.len(), 1);
+ node_txn.remove(0)
+ };
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
+
+ // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
+ check_closed_broadcast!(nodes[1]);
+
+ // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
+ {
+ let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
+ monitors.get_mut(&OutPoint::new(Sha256dHash::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), 0)).unwrap()
+ .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
+ }
+ nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
+ let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ assert_eq!(node_txn[0].input.len(), 1);
+ assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
+ assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
+ assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
+
+ check_spends!(node_txn[0], tx);
+}
+
+#[test]
+fn test_unconf_chan() {
+ // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
+ let nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let channel_state = nodes[0].node.channel_state.lock().unwrap();
+ assert_eq!(channel_state.by_id.len(), 1);
+ assert_eq!(channel_state.short_to_id.len(), 1);
+ mem::drop(channel_state);
+
+ let mut headers = Vec::new();
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ headers.push(header.clone());
+ for _i in 2..100 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ headers.push(header.clone());
+ }
+ let mut height = 99;
+ while !headers.is_empty() {
+ nodes[0].node.block_disconnected(&headers.pop().unwrap(), height);
+ height -= 1;
+ }
+ check_closed_broadcast!(nodes[0]);
+ let channel_state = nodes[0].node.channel_state.lock().unwrap();
+ assert_eq!(channel_state.by_id.len(), 0);
+ assert_eq!(channel_state.short_to_id.len(), 0);
+}
+
+#[test]
+fn test_simple_peer_disconnect() {
+ // Test that we can reconnect when there are no lost messages
+ let nodes = create_network(3, &[None, None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
+ let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
+ fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
+ let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
+ let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
+ let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
+ fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
+
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
+ {
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentSent { payment_preimage } => {
+ assert_eq!(payment_preimage, payment_preimage_3);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
+ assert_eq!(payment_hash, payment_hash_5);
+ assert!(rejected_by_dest);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
+ fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
+}
+
+fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
+ // Test that we can reconnect when in-flight HTLC updates get dropped
+ let mut nodes = create_network(2, &[None, None]);
+ if messages_delivered == 0 {
+ create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
+ // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
+ } else {
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ }
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+ let payment_event = {
+ nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
+
+ if messages_delivered < 2 {
+ // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
+ } else {
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ if messages_delivered >= 3 {
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ if messages_delivered >= 4 {
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 5 {
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed).unwrap();
+ let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 6 {
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+ }
+ }
+ }
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ if messages_delivered < 3 {
+ // Even if the funding_locked messages get exchanged, as long as nothing further was
+ // received on either side, both sides will need to resend them.
+ reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 3 {
+ // nodes[0] still wants its RAA + commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ } else if messages_delivered == 4 {
+ // nodes[0] still wants its commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 5 {
+ // nodes[1] still wants its final RAA
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+ } else if messages_delivered == 6 {
+ // Everything was delivered...
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ let events_1 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ nodes[1].node.process_pending_htlc_forwards();
+
+ let events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(payment_hash_1, *payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[1].node.claim_funds(payment_preimage_1);
+ check_added_monitors!(nodes[1], 1);
+
+ let events_3 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_3.len(), 1);
+ let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ if messages_delivered >= 1 {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
+
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(payment_preimage_1, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ if messages_delivered >= 2 {
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ if messages_delivered >= 3 {
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 4 {
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap();
+ let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 5 {
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+ }
+ }
+ }
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ if messages_delivered < 2 {
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+ //TODO: Deduplicate PaymentSent events, then enable this if:
+ //if messages_delivered < 1 {
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(payment_preimage_1, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ //}
+ } else if messages_delivered == 2 {
+ // nodes[0] still wants its RAA + commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
+ } else if messages_delivered == 3 {
+ // nodes[0] still wants its commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 4 {
+ // nodes[1] still wants its final RAA
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ } else if messages_delivered == 5 {
+ // Everything was delivered...
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // Channel should still work fine...
+ let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+#[test]
+fn test_drop_messages_peer_disconnect_a() {
+ do_test_drop_messages_peer_disconnect(0);
+ do_test_drop_messages_peer_disconnect(1);
+ do_test_drop_messages_peer_disconnect(2);
+ do_test_drop_messages_peer_disconnect(3);
+}
+
+#[test]
+fn test_drop_messages_peer_disconnect_b() {
+ do_test_drop_messages_peer_disconnect(4);
+ do_test_drop_messages_peer_disconnect(5);
+ do_test_drop_messages_peer_disconnect(6);
+}
+
+#[test]
+fn test_funding_peer_disconnect() {
+ // Test that we can lock in our funding tx while disconnected
+ let nodes = create_network(2, &[None, None]);
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version);
+ let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
+ let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 2);
+ match events_2[0] {
+ MessageSendEvent::SendFundingLocked { ref node_id, msg: _ } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events_2[1] {
+ MessageSendEvent::SendAnnouncementSignatures { ref node_id, msg: _ } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
+ // rebroadcasting announcement_signatures upon reconnect.
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+}
+
+#[test]
+fn test_drop_messages_peer_disconnect_dual_htlc() {
+ // Test that we can handle reconnecting when both sides of a channel have pending
+ // commitment_updates when we disconnect.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let (payment_preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ // Now try to send a second payment which will fail to send
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_2, payment_hash_2) = get_payment_preimage_hash!(nodes[0]);
+
+ nodes[0].node.send_payment(route.clone(), payment_hash_2).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let events_1 = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ MessageSendEvent::UpdateHTLCs { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ assert!(nodes[1].node.claim_funds(payment_preimage_1));
+ check_added_monitors!(nodes[1], 1);
+
+ let events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]).unwrap();
+ let events_3 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, payment_preimage_1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
+ let _ = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ assert_eq!(reestablish_1.len(), 1);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ assert_eq!(reestablish_2.len(), 1);
+
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ assert!(as_resp.0.is_none());
+ assert!(bs_resp.0.is_none());
+
+ assert!(bs_resp.1.is_none());
+ assert!(bs_resp.2.is_none());
+
+ assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst);
+
+ assert_eq!(as_resp.2.as_ref().unwrap().update_add_htlcs.len(), 1);
+ assert!(as_resp.2.as_ref().unwrap().update_fulfill_htlcs.is_empty());
+ assert!(as_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty());
+ assert!(as_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty());
+ assert!(as_resp.2.as_ref().unwrap().update_fee.is_none());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().update_add_htlcs[0]).unwrap();
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_resp.2.as_ref().unwrap().commitment_signed).unwrap();
+ let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), as_resp.1.as_ref().unwrap()).unwrap();
+ let bs_second_commitment_signed = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(bs_second_commitment_signed.update_add_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fulfill_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fail_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fail_malformed_htlcs.is_empty());
+ assert!(bs_second_commitment_signed.update_fee.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ let as_commitment_signed = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ assert!(as_commitment_signed.update_add_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fulfill_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fail_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fail_malformed_htlcs.is_empty());
+ assert!(as_commitment_signed.update_fee.is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_commitment_signed.commitment_signed).unwrap();
+ let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.commitment_signed).unwrap();
+ let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+ // No commitment_signed so get_event_msg's assert(len == 1) passes
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let events_5 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_5.len(), 1);
+ match events_5[0] {
+ Event::PaymentReceived { ref payment_hash, amt: _ } => {
+ assert_eq!(payment_hash_2, *payment_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ check_added_monitors!(nodes[0], 1);
+
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+}
+
+#[test]
+fn test_invalid_channel_announcement() {
+ //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
+ let secp_ctx = Secp256k1::new();
+ let nodes = create_network(2, &[None, None]);
+
+ let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1], LocalFeatures::new(), LocalFeatures::new());
+
+ let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
+ let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
+ let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
+
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
+
+ let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
+ let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
+
+ let as_network_key = nodes[0].node.get_our_node_id();
+ let bs_network_key = nodes[1].node.get_our_node_id();
+
+ let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
+
+ let mut chan_announcement;
+
+ macro_rules! dummy_unsigned_msg {
+ () => {
+ msgs::UnsignedChannelAnnouncement {
+ features: msgs::GlobalFeatures::new(),
+ chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
+ short_channel_id: as_chan.get_short_channel_id().unwrap(),
+ node_id_1: if were_node_one { as_network_key } else { bs_network_key },
+ node_id_2: if were_node_one { bs_network_key } else { as_network_key },
+ bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
+ bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
+ excess_data: Vec::new(),
+ };
+ }
+ }
+
+ macro_rules! sign_msg {
+ ($unsigned_msg: expr) => {
+ let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap();
+ let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
+ let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
+ let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret());
+ let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].keys_manager.get_node_secret());
+ chan_announcement = msgs::ChannelAnnouncement {
+ node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
+ node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
+ bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
+ bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
+ contents: $unsigned_msg
+ }
+ }
+ }
+
+ let unsigned_msg = dummy_unsigned_msg!();
+ sign_msg!(unsigned_msg);
+ assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
+
+ // Configured with Network::Testnet
+ let mut unsigned_msg = dummy_unsigned_msg!();
+ unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
+ sign_msg!(unsigned_msg);
+ assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
+
+ let mut unsigned_msg = dummy_unsigned_msg!();
+ unsigned_msg.chain_hash = Sha256dHash::hash(&[1,2,3,4,5,6,7,8,9]);
+ sign_msg!(unsigned_msg);
+ assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
+}
+
+#[test]
+fn test_no_txn_manager_serialize_deserialize() {
+ let mut nodes = create_network(2, &[None, None]);
+
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, LocalFeatures::new(), LocalFeatures::new());
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let nodes_0_serialized = nodes[0].node.encode();
+ let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+ nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+
+ nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
+ let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+ let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
+ assert!(chan_0_monitor_read.is_empty());
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let config = UserConfig::new();
+ let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+ let (_, nodes_0_deserialized) = {
+ let mut channel_monitors = HashMap::new();
+ channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
+ <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ default_config: config,
+ keys_manager,
+ fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+ monitor: nodes[0].chan_monitor.clone(),
+ chain_monitor: nodes[0].chain_monitor.clone(),
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: Arc::new(test_utils::TestLogger::new()),
+ channel_monitors: &channel_monitors,
+ }).unwrap()
+ };
+ assert!(nodes_0_read.is_empty());
+
+ assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+ nodes[0].node = Arc::new(nodes_0_deserialized);
+ let nodes_0_as_listener: Arc<ChainListener> = nodes[0].node.clone();
+ nodes[0].chain_monitor.register_listener(Arc::downgrade(&nodes_0_as_listener));
+ assert_eq!(nodes[0].node.list_channels().len(), 1);
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+
+ let (funding_locked, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
+ let (announcement, as_update, bs_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &funding_locked);
+ for node in nodes.iter() {
+ assert!(node.router.handle_channel_announcement(&announcement).unwrap());
+ node.router.handle_channel_update(&as_update).unwrap();
+ node.router.handle_channel_update(&bs_update).unwrap();
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], 1000000);
+}
+
+#[test]
+fn test_simple_manager_serialize_deserialize() {
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ let nodes_0_serialized = nodes[0].node.encode();
+ let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
+ nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut chan_0_monitor_serialized).unwrap();
+
+ nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
+ let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
+ let (_, chan_0_monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut chan_0_monitor_read, Arc::new(test_utils::TestLogger::new())).unwrap();
+ assert!(chan_0_monitor_read.is_empty());
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+ let (_, nodes_0_deserialized) = {
+ let mut channel_monitors = HashMap::new();
+ channel_monitors.insert(chan_0_monitor.get_funding_txo().unwrap(), &chan_0_monitor);
+ <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ default_config: UserConfig::new(),
+ keys_manager,
+ fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+ monitor: nodes[0].chan_monitor.clone(),
+ chain_monitor: nodes[0].chain_monitor.clone(),
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: Arc::new(test_utils::TestLogger::new()),
+ channel_monitors: &channel_monitors,
+ }).unwrap()
+ };
+ assert!(nodes_0_read.is_empty());
+
+ assert!(nodes[0].chan_monitor.add_update_monitor(chan_0_monitor.get_funding_txo().unwrap(), chan_0_monitor).is_ok());
+ nodes[0].node = Arc::new(nodes_0_deserialized);
+ check_added_monitors!(nodes[0], 1);
+
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash);
+ claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage);
+}
+
+#[test]
+fn test_manager_serialize_deserialize_inconsistent_monitor() {
+ // Test deserializing a ChannelManager with an out-of-date ChannelMonitor
+ let mut nodes = create_network(4, &[None, None, None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 2, 0, LocalFeatures::new(), LocalFeatures::new());
+ let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, LocalFeatures::new(), LocalFeatures::new());
+
+ let (our_payment_preimage, _) = route_payment(&nodes[2], &[&nodes[0], &nodes[1]], 1000000);
+
+ // Serialize the ChannelManager here, but the monitor we keep up-to-date
+ let nodes_0_serialized = nodes[0].node.encode();
+
+ route_payment(&nodes[0], &[&nodes[3]], 1000000);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[3].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
+ // nodes[3])
+ let mut node_0_monitors_serialized = Vec::new();
+ for monitor in nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter() {
+ let mut writer = test_utils::TestVecWriter(Vec::new());
+ monitor.1.write_for_disk(&mut writer).unwrap();
+ node_0_monitors_serialized.push(writer.0);
+ }
+
+ nodes[0].chan_monitor = Arc::new(test_utils::TestChannelMonitor::new(nodes[0].chain_monitor.clone(), nodes[0].tx_broadcaster.clone(), Arc::new(test_utils::TestLogger::new()), Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 })));
+ let mut node_0_monitors = Vec::new();
+ for serialized in node_0_monitors_serialized.iter() {
+ let mut read = &serialized[..];
+ let (_, monitor) = <(Sha256dHash, ChannelMonitor)>::read(&mut read, Arc::new(test_utils::TestLogger::new())).unwrap();
+ assert!(read.is_empty());
+ node_0_monitors.push(monitor);
+ }
+
+ let mut nodes_0_read = &nodes_0_serialized[..];
+ let keys_manager = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new())));
+ let (_, nodes_0_deserialized) = <(Sha256dHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ default_config: UserConfig::new(),
+ keys_manager,
+ fee_estimator: Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 }),
+ monitor: nodes[0].chan_monitor.clone(),
+ chain_monitor: nodes[0].chain_monitor.clone(),
+ tx_broadcaster: nodes[0].tx_broadcaster.clone(),
+ logger: Arc::new(test_utils::TestLogger::new()),
+ channel_monitors: &node_0_monitors.iter().map(|monitor| { (monitor.get_funding_txo().unwrap(), monitor) }).collect(),
+ }).unwrap();
+ assert!(nodes_0_read.is_empty());
+
+ { // Channel close should result in a commitment tx and an HTLC tx
+ let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(txn.len(), 2);
+ assert_eq!(txn[0].input[0].previous_output.txid, funding_tx.txid());
+ assert_eq!(txn[1].input[0].previous_output.txid, txn[0].txid());
+ }
+
+ for monitor in node_0_monitors.drain(..) {
+ assert!(nodes[0].chan_monitor.add_update_monitor(monitor.get_funding_txo().unwrap(), monitor).is_ok());
+ check_added_monitors!(nodes[0], 1);
+ }
+ nodes[0].node = Arc::new(nodes_0_deserialized);
+
+ // nodes[1] and nodes[2] have no lost state with nodes[0]...
+ reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ //... and we can even still claim the payment!
+ claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage);
+
+ nodes[3].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish = get_event_msg!(nodes[3], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
+ nodes[0].node.peer_connected(&nodes[3].node.get_our_node_id());
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError { action: msgs::ErrorAction::SendErrorMessage { msg }, .. }) = nodes[0].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish) {
+ assert_eq!(msg.channel_id, channel_id);
+ } else { panic!("Unexpected result"); }
+}
+
+macro_rules! check_spendable_outputs {
+ ($node: expr, $der_idx: expr) => {
+ {
+ let events = $node.chan_monitor.simple_monitor.get_and_clear_pending_events();
+ let mut txn = Vec::new();
+ for event in events {
+ match event {
+ Event::SpendableOutputs { ref outputs } => {
+ for outp in outputs {
+ match *outp {
+ SpendableOutputDescriptor::DynamicOutputP2WPKH { ref outpoint, ref key, ref output } => {
+ let input = TxIn {
+ previous_output: outpoint.clone(),
+ script_sig: Script::new(),
+ sequence: 0,
+ witness: Vec::new(),
+ };
+ let outp = TxOut {
+ script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
+ value: output.value,
+ };
+ let mut spend_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: vec![input],
+ output: vec![outp],
+ };
+ let secp_ctx = Secp256k1::new();
+ let remotepubkey = PublicKey::from_secret_key(&secp_ctx, &key);
+ let witness_script = Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey();
+ let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap();
+ let remotesig = secp_ctx.sign(&sighash, key);
+ spend_tx.input[0].witness.push(remotesig.serialize_der().to_vec());
+ spend_tx.input[0].witness[0].push(SigHashType::All as u8);
+ spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec());
+ txn.push(spend_tx);
+ },
+ SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref key, ref witness_script, ref to_self_delay, ref output } => {
+ let input = TxIn {
+ previous_output: outpoint.clone(),
+ script_sig: Script::new(),
+ sequence: *to_self_delay as u32,
+ witness: Vec::new(),
+ };
+ let outp = TxOut {
+ script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
+ value: output.value,
+ };
+ let mut spend_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: vec![input],
+ output: vec![outp],
+ };
+ let secp_ctx = Secp256k1::new();
+ let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], witness_script, output.value)[..]).unwrap();
+ let local_delaysig = secp_ctx.sign(&sighash, key);
+ spend_tx.input[0].witness.push(local_delaysig.serialize_der().to_vec());
+ spend_tx.input[0].witness[0].push(SigHashType::All as u8);
+ spend_tx.input[0].witness.push(vec!(0));
+ spend_tx.input[0].witness.push(witness_script.clone().into_bytes());
+ txn.push(spend_tx);
+ },
+ SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
+ let secp_ctx = Secp256k1::new();
+ let input = TxIn {
+ previous_output: outpoint.clone(),
+ script_sig: Script::new(),
+ sequence: 0,
+ witness: Vec::new(),
+ };
+ let outp = TxOut {
+ script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
+ value: output.value,
+ };
+ let mut spend_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: vec![input],
+ output: vec![outp.clone()],
+ };
+ let secret = {
+ match ExtendedPrivKey::new_master(Network::Testnet, &$node.node_seed) {
+ Ok(master_key) => {
+ match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx).expect("key space exhausted")) {
+ Ok(key) => key,
+ Err(_) => panic!("Your RNG is busted"),
+ }
+ }
+ Err(_) => panic!("Your rng is busted"),
+ }
+ };
+ let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key;
+ let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey();
+ let sighash = Message::from_slice(&bip143::SighashComponents::new(&spend_tx).sighash_all(&spend_tx.input[0], &witness_script, output.value)[..]).unwrap();
+ let sig = secp_ctx.sign(&sighash, &secret.private_key.key);
+ spend_tx.input[0].witness.push(sig.serialize_der().to_vec());
+ spend_tx.input[0].witness[0].push(SigHashType::All as u8);
+ spend_tx.input[0].witness.push(pubkey.key.serialize().to_vec());
+ txn.push(spend_tx);
+ },
+ }
+ }
+ },
+ _ => panic!("Unexpected event"),
+ };
+ }
+ txn
+ }
+ }
+}
+
+#[test]
+fn test_claim_sizeable_push_msat() {
+ // Incidentally test SpendableOutput event generation due to detection of to_local output on commitment tx
+ let nodes = create_network(2, &[None, None]);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new());
+ nodes[1].node.force_close_channel(&chan.2);
+ check_closed_broadcast!(nodes[1]);
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], chan.3.clone());
+ assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
+ let spend_txn = check_spendable_outputs!(nodes[1], 1);
+ assert_eq!(spend_txn.len(), 1);
+ check_spends!(spend_txn[0], node_txn[0].clone());
+}
+
+#[test]
+fn test_claim_on_remote_sizeable_push_msat() {
+ // Same test as previous, just test on remote commitment tx, as per_commitment_point registration changes following you're funder/fundee and
+ // to_remote output is encumbered by a P2WPKH
+
+ let nodes = create_network(2, &[None, None]);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, LocalFeatures::new(), LocalFeatures::new());
+ nodes[0].node.force_close_channel(&chan.2);
+ check_closed_broadcast!(nodes[0]);
+
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], chan.3.clone());
+ assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
+ check_closed_broadcast!(nodes[1]);
+ let spend_txn = check_spendable_outputs!(nodes[1], 1);
+ assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn[0], spend_txn[1]);
+ check_spends!(spend_txn[0], node_txn[0].clone());
+}
+
+#[test]
+fn test_claim_on_remote_revoked_sizeable_push_msat() {
+ // Same test as previous, just test on remote revoked commitment tx, as per_commitment_point registration changes following you're funder/fundee and
+ // to_remote output is encumbered by a P2WPKH
+
+ let nodes = create_network(2, &[None, None]);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 59000000, LocalFeatures::new(), LocalFeatures::new());
+ let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
+
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[1]);
+
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let spend_txn = check_spendable_outputs!(nodes[1], 1);
+ assert_eq!(spend_txn.len(), 4);
+ assert_eq!(spend_txn[0], spend_txn[2]); // to_remote output on revoked remote commitment_tx
+ check_spends!(spend_txn[0], revoked_local_txn[0].clone());
+ assert_eq!(spend_txn[1], spend_txn[3]); // to_local output on local commitment tx
+ check_spends!(spend_txn[1], node_txn[0].clone());
+}
+
+#[test]
+fn test_static_spendable_outputs_preimage_tx() {
+ let nodes = create_network(2, &[None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+
+ let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(commitment_tx[0].input.len(), 1);
+ assert_eq!(commitment_tx[0].input[0].previous_output.txid, chan_1.3.txid());
+
+ // Settle A's commitment tx on B's chain
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ assert!(nodes[1].node.claim_funds(payment_preimage));
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()] }, 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexepected event"),
+ }
+
+ // Check B's monitor was able to send back output descriptor event for preimage tx on A's commitment tx
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 1 (local commitment tx), ChannelMonitor: 2 (1 preimage tx) * 2 (block-rescan)
+ check_spends!(node_txn[0], commitment_tx[0].clone());
+ assert_eq!(node_txn[0], node_txn[2]);
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(node_txn[1], chan_1.3.clone());
+
+ let spend_txn = check_spendable_outputs!(nodes[1], 1); // , 0, 0, 1, 1);
+ assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn[0], spend_txn[1]);
+ check_spends!(spend_txn[0], node_txn[0].clone());
+}
+
+#[test]
+fn test_static_spendable_outputs_justice_tx_revoked_commitment_tx() {
+ let nodes = create_network(2, &[None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
+
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[1]);
+
+ let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn.pop().unwrap(), node_txn[0]);
+ assert_eq!(node_txn[0].input.len(), 2);
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+
+ let spend_txn = check_spendable_outputs!(nodes[1], 1);
+ assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn[0], spend_txn[1]);
+ check_spends!(spend_txn[0], node_txn[0].clone());
+}
+
+#[test]
+fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
+ let nodes = create_network(2, &[None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
+
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ // A will generate HTLC-Timeout from revoked commitment tx
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[0]);
+
+ let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(revoked_htlc_txn.len(), 3);
+ assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]);
+ assert_eq!(revoked_htlc_txn[0].input.len(), 1);
+ assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone());
+ check_spends!(revoked_htlc_txn[1], chan_1.3.clone());
+
+ // B will generate justice tx from A's revoked commitment/HTLC tx
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[1]);
+
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 4);
+ assert_eq!(node_txn[3].input.len(), 1);
+ check_spends!(node_txn[3], revoked_htlc_txn[0].clone());
+
+ // Check B's ChannelMonitor was able to generate the right spendable output descriptor
+ let spend_txn = check_spendable_outputs!(nodes[1], 1);
+ assert_eq!(spend_txn.len(), 3);
+ assert_eq!(spend_txn[0], spend_txn[1]);
+ check_spends!(spend_txn[0], node_txn[0].clone());
+ check_spends!(spend_txn[2], node_txn[3].clone());
+}
+
+#[test]
+fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
+ let nodes = create_network(2, &[None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let revoked_local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
+
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ // B will generate HTLC-Success from revoked commitment tx
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[1]);
+ let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+
+ assert_eq!(revoked_htlc_txn.len(), 3);
+ assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]);
+ assert_eq!(revoked_htlc_txn[0].input.len(), 1);
+ assert_eq!(revoked_htlc_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(revoked_htlc_txn[0], revoked_local_txn[0].clone());
+
+ // A will generate justice tx from B's revoked commitment/HTLC tx
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[0]);
+
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 4);
+ assert_eq!(node_txn[3].input.len(), 1);
+ check_spends!(node_txn[3], revoked_htlc_txn[0].clone());
+
+ // Check A's ChannelMonitor was able to generate the right spendable output descriptor
+ let spend_txn = check_spendable_outputs!(nodes[0], 1);
+ assert_eq!(spend_txn.len(), 5);
+ assert_eq!(spend_txn[0], spend_txn[2]);
+ assert_eq!(spend_txn[1], spend_txn[3]);
+ check_spends!(spend_txn[0], revoked_local_txn[0].clone()); // spending to_remote output from revoked local tx
+ check_spends!(spend_txn[1], node_txn[2].clone()); // spending justice tx output from revoked local tx htlc received output
+ check_spends!(spend_txn[4], node_txn[3].clone()); // spending justice tx output on htlc success tx
+}
+
+#[test]
+fn test_onchain_to_onchain_claim() {
+ // Test that in case of channel closure, we detect the state of output thanks to
+ // ChainWatchInterface and claim HTLC on downstream peer's remote commitment tx.
+ // First, have C claim an HTLC against its own latest commitment transaction.
+ // Then, broadcast these to B, which should update the monitor downstream on the A<->B
+ // channel.
+ // Finally, check that B will claim the HTLC output if A's latest commitment transaction
+ // gets broadcast.
+
+ let nodes = create_network(3, &[None, None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance the network a bit by relaying one payment through all the channels ...
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
+
+ let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+ check_spends!(commitment_tx[0], chan_2.3.clone());
+ nodes[2].node.claim_funds(payment_preimage);
+ check_added_monitors!(nodes[2], 1);
+ let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+
+ nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+ check_closed_broadcast!(nodes[2]);
+
+ let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
+ assert_eq!(c_txn.len(), 3);
+ assert_eq!(c_txn[0], c_txn[2]);
+ assert_eq!(commitment_tx[0], c_txn[1]);
+ check_spends!(c_txn[1], chan_2.3.clone());
+ check_spends!(c_txn[2], c_txn[1].clone());
+ assert_eq!(c_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
+ assert_eq!(c_txn[2].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert!(c_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert_eq!(c_txn[0].lock_time, 0); // Success tx
+
+ // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}, 1);
+ {
+ let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(b_txn.len(), 4);
+ assert_eq!(b_txn[0], b_txn[3]);
+ check_spends!(b_txn[1], chan_2.3); // B local commitment tx, issued by ChannelManager
+ check_spends!(b_txn[2], b_txn[1].clone()); // HTLC-Timeout on B local commitment tx, issued by ChannelManager
+ assert_eq!(b_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert!(b_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert_ne!(b_txn[2].lock_time, 0); // Timeout tx
+ check_spends!(b_txn[0], c_txn[1].clone()); // timeout tx on C remote commitment tx, issued by ChannelMonitor, * 2 due to block rescan
+ assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ assert_ne!(b_txn[2].lock_time, 0); // Timeout tx
+ b_txn.clear();
+ }
+ let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[1], 1);
+ match msg_events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ match msg_events[1] {
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
+ },
+ _ => panic!("Unexpected event"),
+ };
+ // Broadcast A's commitment tx on B's chain to see if we are able to claim inbound HTLC with our HTLC-Success tx
+ let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
+ let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(b_txn.len(), 3);
+ check_spends!(b_txn[1], chan_1.3); // Local commitment tx, issued by ChannelManager
+ assert_eq!(b_txn[0], b_txn[2]); // HTLC-Success tx, issued by ChannelMonitor, * 2 due to block rescan
+ check_spends!(b_txn[0], commitment_tx[0].clone());
+ assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ assert_eq!(b_txn[2].lock_time, 0); // Success tx
+
+ check_closed_broadcast!(nodes[1]);
+}
+
+#[test]
+fn test_duplicate_payment_hash_one_failure_one_success() {
+ // Topology : A --> B --> C
+ // We route 2 payments with same hash between B and C, one will be timeout, the other successfully claim
+ let mut nodes = create_network(3, &[None, None, None]);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+
+ let (our_payment_preimage, duplicate_payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000);
+ *nodes[0].network_payment_count.borrow_mut() -= 1;
+ assert_eq!(route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 900000).1, duplicate_payment_hash);
+
+ let commitment_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(commitment_txn[0].input.len(), 1);
+ check_spends!(commitment_txn[0], chan_2.3.clone());
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[1]);
+
+ let htlc_timeout_tx;
+ { // Extract one of the two HTLC-Timeout transaction
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 7);
+ assert_eq!(node_txn[0], node_txn[5]);
+ assert_eq!(node_txn[1], node_txn[6]);
+ check_spends!(node_txn[0], commitment_txn[0].clone());
+ assert_eq!(node_txn[0].input.len(), 1);
+ check_spends!(node_txn[1], commitment_txn[0].clone());
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_ne!(node_txn[0].input[0], node_txn[1].input[0]);
+ check_spends!(node_txn[2], chan_2.3.clone());
+ check_spends!(node_txn[3], node_txn[2].clone());
+ check_spends!(node_txn[4], node_txn[2].clone());
+ htlc_timeout_tx = node_txn[1].clone();
+ }
+
+ nodes[2].node.claim_funds(our_payment_preimage);
+ nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
+ check_added_monitors!(nodes[2], 2);
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexepected event"),
+ }
+ let htlc_success_txn: Vec<_> = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ assert_eq!(htlc_success_txn.len(), 5);
+ check_spends!(htlc_success_txn[2], chan_2.3.clone());
+ assert_eq!(htlc_success_txn[0], htlc_success_txn[3]);
+ assert_eq!(htlc_success_txn[0].input.len(), 1);
+ assert_eq!(htlc_success_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(htlc_success_txn[1], htlc_success_txn[4]);
+ assert_eq!(htlc_success_txn[1].input.len(), 1);
+ assert_eq!(htlc_success_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert_ne!(htlc_success_txn[0].input[0], htlc_success_txn[1].input[0]);
+ check_spends!(htlc_success_txn[0], commitment_txn[0].clone());
+ check_spends!(htlc_success_txn[1], commitment_txn[0].clone());
+
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200);
+ connect_blocks(&nodes[1].chain_monitor, ANTI_REORG_DELAY - 1, 200, true, header.bitcoin_hash());
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(htlc_updates.update_add_htlcs.is_empty());
+ assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
+ assert_eq!(htlc_updates.update_fail_htlcs[0].htlc_id, 1);
+ assert!(htlc_updates.update_fulfill_htlcs.is_empty());
+ assert!(htlc_updates.update_fail_malformed_htlcs.is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &htlc_updates.update_fail_htlcs[0]).unwrap();
+ assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
+ {
+ commitment_signed_dance!(nodes[0], nodes[1], &htlc_updates.commitment_signed, false, true);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { update: msgs::HTLCFailChannelUpdate::ChannelClosed { .. } } => {
+ },
+ _ => { panic!("Unexpected event"); }
+ }
+ }
+ let events = nodes[0].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert_eq!(*payment_hash, duplicate_payment_hash);
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ // Solve 2nd HTLC by broadcasting on B's chain HTLC-Success Tx from C
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_success_txn[0].clone()] }, 200);
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ assert_eq!(updates.update_fulfill_htlcs[0].htlc_id, 0);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ check_added_monitors!(nodes[1], 1);
+
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]).unwrap();
+ commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(*payment_preimage, our_payment_preimage);
+ }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_dynamic_spendable_outputs_local_htlc_success_tx() {
+ let nodes = create_network(2, &[None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
+ let local_txn = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(local_txn[0].input.len(), 1);
+ check_spends!(local_txn[0], chan_1.3.clone());
+
+ // Give B knowledge of preimage to be able to generate a local HTLC-Success Tx
+ nodes[1].node.claim_funds(payment_preimage);
+ check_added_monitors!(nodes[1], 1);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 1);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexepected event"),
+ }
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn[0].input.len(), 1);
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(node_txn[0], local_txn[0].clone());
+
+ // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
+ let spend_txn = check_spendable_outputs!(nodes[1], 1);
+ assert_eq!(spend_txn.len(), 2);
+ check_spends!(spend_txn[0], node_txn[0].clone());
+ check_spends!(spend_txn[1], node_txn[2].clone());
+}
+
+fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
+ // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
+ // unrevoked commitment transaction.
+ // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
+ // a remote RAA before they could be failed backwards (and combinations thereof).
+ // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
+ // use the same payment hashes.
+ // Thus, we use a six-node network:
+ //
+ // A \ / E
+ // - C - D -
+ // B / \ F
+ // And test where C fails back to A/B when D announces its latest commitment transaction
+ let nodes = create_network(6, &[None, None, None, None, None, None]);
+
+ create_announced_chan_between_nodes(&nodes, 0, 2, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new());
+ let chan = create_announced_chan_between_nodes(&nodes, 2, 3, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 3, 4, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes(&nodes, 3, 5, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance and check output sanity...
+ send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
+ send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
+ assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 2);
+
+ let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+ // 0th HTLC:
+ let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ // 1st HTLC:
+ let (_, payment_hash_2) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV).unwrap();
+ // 2nd HTLC:
+ send_along_route_with_hash(&nodes[1], route.clone(), &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_1); // not added < dust limit + HTLC tx fee
+ // 3rd HTLC:
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_2); // not added < dust limit + HTLC tx fee
+ // 4th HTLC:
+ let (_, payment_hash_3) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ // 5th HTLC:
+ let (_, payment_hash_4) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ // 6th HTLC:
+ send_along_route_with_hash(&nodes[1], route.clone(), &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_3);
+ // 7th HTLC:
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_4);
+
+ // 8th HTLC:
+ let (_, payment_hash_5) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ // 9th HTLC:
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV).unwrap();
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_5); // not added < dust limit + HTLC tx fee
+
+ // 10th HTLC:
+ let (_, payment_hash_6) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ // 11th HTLC:
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_6);
+
+ // Double-check that six of the new HTLC were added
+ // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
+ // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
+ assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.len(), 1);
+ assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 8);
+
+ // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
+ // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1));
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3));
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5));
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6));
+ check_added_monitors!(nodes[4], 0);
+ expect_pending_htlcs_forwardable!(nodes[4]);
+ check_added_monitors!(nodes[4], 1);
+
+ let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]).unwrap();
+ commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
+
+ // Fail 3rd below-dust and 7th above-dust HTLCs
+ assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2));
+ assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4));
+ check_added_monitors!(nodes[5], 0);
+ expect_pending_htlcs_forwardable!(nodes[5]);
+ check_added_monitors!(nodes[5], 1);
+
+ let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
+ nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]).unwrap();
+ commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
+
+ let ds_prev_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+ expect_pending_htlcs_forwardable!(nodes[3]);
+ check_added_monitors!(nodes[3], 1);
+ let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]).unwrap();
+ if deliver_last_raa {
+ commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
+ } else {
+ let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
+ }
+
+ // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
+ // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
+ // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
+ // propagated back to A/B yet (and D has two unrevoked commitment transactions).
+ //
+ // We now broadcast the latest commitment transaction, which *should* result in failures for
+ // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
+ // the non-broadcast above-dust HTLCs.
+ //
+ // Alternatively, we may broadcast the previous commitment transaction, which should only
+ // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
+ let ds_last_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ if announce_latest {
+ nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_last_commitment_tx[0]], &[1; 1]);
+ } else {
+ nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_prev_commitment_tx[0]], &[1; 1]);
+ }
+ connect_blocks(&nodes[2].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+ check_closed_broadcast!(nodes[2]);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 2);
+
+ let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(cs_msgs.len(), 2);
+ let mut a_done = false;
+ for msg in cs_msgs {
+ match msg {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
+ // should be failed-backwards here.
+ let target = if *node_id == nodes[0].node.get_our_node_id() {
+ // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
+ for htlc in &updates.update_fail_htlcs {
+ assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
+ }
+ assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
+ assert!(!a_done);
+ a_done = true;
+ &nodes[0]
+ } else {
+ // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
+ for htlc in &updates.update_fail_htlcs {
+ assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
+ }
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
+ &nodes[1]
+ };
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap();
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]).unwrap();
+ if announce_latest {
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]).unwrap();
+ if *node_id == nodes[0].node.get_our_node_id() {
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]).unwrap();
+ }
+ }
+ commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ let as_events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(as_events.len(), if announce_latest { 5 } else { 3 });
+ let mut as_failds = HashSet::new();
+ for event in as_events.iter() {
+ if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
+ assert!(as_failds.insert(*payment_hash));
+ if *payment_hash != payment_hash_2 {
+ assert_eq!(*rejected_by_dest, deliver_last_raa);
+ } else {
+ assert!(!rejected_by_dest);
+ }
+ } else { panic!("Unexpected event"); }
+ }
+ assert!(as_failds.contains(&payment_hash_1));
+ assert!(as_failds.contains(&payment_hash_2));
+ if announce_latest {
+ assert!(as_failds.contains(&payment_hash_3));
+ assert!(as_failds.contains(&payment_hash_5));
+ }
+ assert!(as_failds.contains(&payment_hash_6));
+
+ let bs_events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(bs_events.len(), if announce_latest { 4 } else { 3 });
+ let mut bs_failds = HashSet::new();
+ for event in bs_events.iter() {
+ if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
+ assert!(bs_failds.insert(*payment_hash));
+ if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
+ assert_eq!(*rejected_by_dest, deliver_last_raa);
+ } else {
+ assert!(!rejected_by_dest);
+ }
+ } else { panic!("Unexpected event"); }
+ }
+ assert!(bs_failds.contains(&payment_hash_1));
+ assert!(bs_failds.contains(&payment_hash_2));
+ if announce_latest {
+ assert!(bs_failds.contains(&payment_hash_4));
+ }
+ assert!(bs_failds.contains(&payment_hash_5));
+
+ // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
+ // get a PaymentFailureNetworkUpdate. A should have gotten 4 HTLCs which were failed-back due
+ // to unknown-preimage-etc, B should have gotten 2. Thus, in the
+ // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2
+ // PaymentFailureNetworkUpdates.
+ let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(as_msg_events.len(), if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
+ let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(bs_msg_events.len(), if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
+ for event in as_msg_events.iter().chain(bs_msg_events.iter()) {
+ match event {
+ &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+#[test]
+fn test_fail_backwards_latest_remote_announce_a() {
+ do_test_fail_backwards_unrevoked_remote_announce(false, true);
+}
+
+#[test]
+fn test_fail_backwards_latest_remote_announce_b() {
+ do_test_fail_backwards_unrevoked_remote_announce(true, true);
+}
+
+#[test]
+fn test_fail_backwards_previous_remote_announce() {
+ do_test_fail_backwards_unrevoked_remote_announce(false, false);
+ // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
+ // tested for in test_commitment_revoked_fail_backward_exhaustive()
+}
+
+#[test]
+fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
+ let nodes = create_network(2, &[None, None]);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
+ let local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(local_txn[0].input.len(), 1);
+ check_spends!(local_txn[0], chan_1.3.clone());
+
+ // Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 200);
+ check_closed_broadcast!(nodes[0]);
+
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn[0].input.len(), 1);
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ check_spends!(node_txn[0], local_txn[0].clone());
+
+ // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
+ let spend_txn = check_spendable_outputs!(nodes[0], 1);
+ assert_eq!(spend_txn.len(), 8);
+ assert_eq!(spend_txn[0], spend_txn[2]);
+ assert_eq!(spend_txn[0], spend_txn[4]);
+ assert_eq!(spend_txn[0], spend_txn[6]);
+ assert_eq!(spend_txn[1], spend_txn[3]);
+ assert_eq!(spend_txn[1], spend_txn[5]);
+ assert_eq!(spend_txn[1], spend_txn[7]);
+ check_spends!(spend_txn[0], local_txn[0].clone());
+ check_spends!(spend_txn[1], node_txn[0].clone());
+}
+
+#[test]
+fn test_static_output_closing_tx() {
+ let nodes = create_network(2, &[None, None]);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ let closing_tx = close_channel(&nodes[0], &nodes[1], &chan.2, chan.3, true).2;
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1);
+ let spend_txn = check_spendable_outputs!(nodes[0], 2);
+ assert_eq!(spend_txn.len(), 1);
+ check_spends!(spend_txn[0], closing_tx.clone());
+
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![closing_tx.clone()] }, 1);
+ let spend_txn = check_spendable_outputs!(nodes[1], 2);
+ assert_eq!(spend_txn.len(), 1);
+ check_spends!(spend_txn[0], closing_tx);
+}
+
+fn do_htlc_claim_local_commitment_only(use_dust: bool) {
+ let nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
+
+ // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
+ // present in B's local commitment transaction, but none of A's commitment transactions.
+ assert!(nodes[1].node.claim_funds(our_payment_preimage));
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { payment_preimage } => {
+ assert_eq!(payment_preimage, our_payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ for i in 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + CHAN_CONFIRM_DEPTH + 1 {
+ nodes[1].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
+ header.prev_blockhash = header.bitcoin_hash();
+ }
+ test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
+ check_closed_broadcast!(nodes[1]);
+}
+
+fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ // As far as A is concerned, the HTLC is now present only in the latest remote commitment
+ // transaction, however it is not in A's latest local commitment, so we can just broadcast that
+ // to "time out" the HTLC.
+
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
+ nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
+ header.prev_blockhash = header.bitcoin_hash();
+ }
+ test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
+ check_closed_broadcast!(nodes[0]);
+}
+
+fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
+ let nodes = create_network(3, &[None, None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
+ // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
+ // Also optionally test that we *don't* fail the channel in case the commitment transaction was
+ // actually revoked.
+ let htlc_value = if use_dust { 50000 } else { 3000000 };
+ let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
+ assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash));
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ if check_revoke_no_close {
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ }
+
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ for i in 1..TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
+ nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
+ header.prev_blockhash = header.bitcoin_hash();
+ }
+ if !check_revoke_no_close {
+ test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
+ check_closed_broadcast!(nodes[0]);
+ } else {
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
+ assert_eq!(payment_hash, our_payment_hash);
+ assert!(rejected_by_dest);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+// Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
+// There are only a few cases to test here:
+// * its not really normative behavior, but we test that below-dust HTLCs "included" in
+// broadcastable commitment transactions result in channel closure,
+// * its included in an unrevoked-but-previous remote commitment transaction,
+// * its included in the latest remote or local commitment transactions.
+// We test each of the three possible commitment transactions individually and use both dust and
+// non-dust HTLCs.
+// Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
+// assume they are handled the same across all six cases, as both outbound and inbound failures are
+// tested for at least one of the cases in other tests.
+#[test]
+fn htlc_claim_single_commitment_only_a() {
+ do_htlc_claim_local_commitment_only(true);
+ do_htlc_claim_local_commitment_only(false);
+
+ do_htlc_claim_current_remote_commitment_only(true);
+ do_htlc_claim_current_remote_commitment_only(false);
+}
+
+#[test]
+fn htlc_claim_single_commitment_only_b() {
+ do_htlc_claim_previous_remote_commitment_only(true, false);
+ do_htlc_claim_previous_remote_commitment_only(false, false);
+ do_htlc_claim_previous_remote_commitment_only(true, true);
+ do_htlc_claim_previous_remote_commitment_only(false, true);
+}
+
+fn run_onion_failure_test<F1,F2>(_name: &str, test_case: u8, nodes: &Vec<Node>, route: &Route, payment_hash: &PaymentHash, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)
+ where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
+ F2: FnMut(),
+{
+ run_onion_failure_test_with_fail_intercept(_name, test_case, nodes, route, payment_hash, callback_msg, |_|{}, callback_node, expected_retryable, expected_error_code, expected_channel_update);
+}
+
+// test_case
+// 0: node1 fails backward
+// 1: final node fails backward
+// 2: payment completed but the user rejects the payment
+// 3: final node fails backward (but tamper onion payloads from node0)
+// 100: trigger error in the intermediate node and tamper returning fail_htlc
+// 200: trigger error in the final node and tamper returning fail_htlc
+fn run_onion_failure_test_with_fail_intercept<F1,F2,F3>(_name: &str, test_case: u8, nodes: &Vec<Node>, route: &Route, payment_hash: &PaymentHash, mut callback_msg: F1, mut callback_fail: F2, mut callback_node: F3, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)
+ where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
+ F2: for <'a> FnMut(&'a mut msgs::UpdateFailHTLC),
+ F3: FnMut(),
+{
+
+ // reset block height
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ for ix in 0..nodes.len() {
+ nodes[ix].chain_monitor.block_connected_checked(&header, 1, &Vec::new()[..], &[0; 0]);
+ }
+
+ macro_rules! expect_event {
+ ($node: expr, $event_type: path) => {{
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ $event_type { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ }}
+ }
+
+ macro_rules! expect_htlc_forward {
+ ($node: expr) => {{
+ expect_event!($node, Event::PendingHTLCsForwardable);
+ $node.node.process_pending_htlc_forwards();
+ }}
+ }
+
+ // 0 ~~> 2 send payment
+ nodes[0].node.send_payment(route.clone(), payment_hash.clone()).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let update_0 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ // temper update_add (0 => 1)
+ let mut update_add_0 = update_0.update_add_htlcs[0].clone();
+ if test_case == 0 || test_case == 3 || test_case == 100 {
+ callback_msg(&mut update_add_0);
+ callback_node();
+ }
+ // 0 => 1 update_add & CS
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add_0).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], &update_0.commitment_signed, false, true);
+
+ let update_1_0 = match test_case {
+ 0|100 => { // intermediate node failure; fail backward to 0
+ let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(update_1_0.update_fail_htlcs.len()+update_1_0.update_fail_malformed_htlcs.len()==1 && (update_1_0.update_fail_htlcs.len()==1 || update_1_0.update_fail_malformed_htlcs.len()==1));
+ update_1_0
+ },
+ 1|2|3|200 => { // final node failure; forwarding to 2
+ assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
+ // forwarding on 1
+ if test_case != 200 {
+ callback_node();
+ }
+ expect_htlc_forward!(&nodes[1]);
+
+ let update_1 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ check_added_monitors!(&nodes[1], 1);
+ assert_eq!(update_1.update_add_htlcs.len(), 1);
+ // tamper update_add (1 => 2)
+ let mut update_add_1 = update_1.update_add_htlcs[0].clone();
+ if test_case != 3 && test_case != 200 {
+ callback_msg(&mut update_add_1);
+ }
+
+ // 1 => 2
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add_1).unwrap();
+ commitment_signed_dance!(nodes[2], nodes[1], update_1.commitment_signed, false, true);
+
+ if test_case == 2 || test_case == 200 {
+ expect_htlc_forward!(&nodes[2]);
+ expect_event!(&nodes[2], Event::PaymentReceived);
+ callback_node();
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ }
+
+ let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ if test_case == 2 || test_case == 200 {
+ check_added_monitors!(&nodes[2], 1);
+ }
+ assert!(update_2_1.update_fail_htlcs.len() == 1);
+
+ let mut fail_msg = update_2_1.update_fail_htlcs[0].clone();
+ if test_case == 200 {
+ callback_fail(&mut fail_msg);
+ }
+
+ // 2 => 1
+ nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_msg).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[2], update_2_1.commitment_signed, true);
+
+ // backward fail on 1
+ let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert!(update_1_0.update_fail_htlcs.len() == 1);
+ update_1_0
+ },
+ _ => unreachable!(),
+ };
+
+ // 1 => 0 commitment_signed_dance
+ if update_1_0.update_fail_htlcs.len() > 0 {
+ let mut fail_msg = update_1_0.update_fail_htlcs[0].clone();
+ if test_case == 100 {
+ callback_fail(&mut fail_msg);
+ }
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg).unwrap();
+ } else {
+ nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_1_0.update_fail_malformed_htlcs[0]).unwrap();
+ };
+
+ commitment_signed_dance!(nodes[0], nodes[1], update_1_0.commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ if let &Event::PaymentFailed { payment_hash:_, ref rejected_by_dest, ref error_code } = &events[0] {
+ assert_eq!(*rejected_by_dest, !expected_retryable);
+ assert_eq!(*error_code, expected_error_code);
+ } else {
+ panic!("Uexpected event");
+ }
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ if expected_channel_update.is_some() {
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => {
+ match update {
+ &HTLCFailChannelUpdate::ChannelUpdateMessage { .. } => {
+ if let HTLCFailChannelUpdate::ChannelUpdateMessage { .. } = expected_channel_update.unwrap() {} else {
+ panic!("channel_update not found!");
+ }
+ },
+ &HTLCFailChannelUpdate::ChannelClosed { ref short_channel_id, ref is_permanent } => {
+ if let HTLCFailChannelUpdate::ChannelClosed { short_channel_id: ref expected_short_channel_id, is_permanent: ref expected_is_permanent } = expected_channel_update.unwrap() {
+ assert!(*short_channel_id == *expected_short_channel_id);
+ assert!(*is_permanent == *expected_is_permanent);
+ } else {
+ panic!("Unexpected message event");
+ }
+ },
+ &HTLCFailChannelUpdate::NodeFailure { ref node_id, ref is_permanent } => {
+ if let HTLCFailChannelUpdate::NodeFailure { node_id: ref expected_node_id, is_permanent: ref expected_is_permanent } = expected_channel_update.unwrap() {
+ assert!(*node_id == *expected_node_id);
+ assert!(*is_permanent == *expected_is_permanent);
+ } else {
+ panic!("Unexpected message event");
+ }
+ },
+ }
+ },
+ _ => panic!("Unexpected message event"),
+ }
+ } else {
+ assert_eq!(events.len(), 0);
+ }
+}
+
+impl msgs::ChannelUpdate {
+ fn dummy() -> msgs::ChannelUpdate {
+ use secp256k1::ffi::Signature as FFISignature;
+ use secp256k1::Signature;
+ msgs::ChannelUpdate {
+ signature: Signature::from(FFISignature::new()),
+ contents: msgs::UnsignedChannelUpdate {
+ chain_hash: Sha256dHash::hash(&vec![0u8][..]),
+ short_channel_id: 0,
+ timestamp: 0,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: vec![],
+ }
+ }
+ }
+}
+
+#[test]
+fn test_onion_failure() {
+ use ln::msgs::ChannelUpdate;
+ use ln::channelmanager::CLTV_FAR_FAR_AWAY;
+ use secp256k1;
+
+ const BADONION: u16 = 0x8000;
+ const PERM: u16 = 0x4000;
+ const NODE: u16 = 0x2000;
+ const UPDATE: u16 = 0x1000;
+
+ let mut nodes = create_network(3, &[None, None, None]);
+ for node in nodes.iter() {
+ *node.keys_manager.override_session_priv.lock().unwrap() = Some(SecretKey::from_slice(&[3; 32]).unwrap());
+ }
+ let channels = [create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new()), create_announced_chan_between_nodes(&nodes, 1, 2, LocalFeatures::new(), LocalFeatures::new())];
+ let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 40000, TEST_FINAL_CLTV).unwrap();
+ // positve case
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 40000);
+
+ // intermediate node failure
+ run_onion_failure_test("invalid_realm", 0, &nodes, &route, &payment_hash, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ onion_payloads[0].realm = 3;
+ msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+ }, ||{}, true, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));//XXX incremented channels idx here
+
+ // final node failure
+ run_onion_failure_test("invalid_realm", 3, &nodes, &route, &payment_hash, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ let (mut onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ onion_payloads[1].realm = 3;
+ msg.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+ }, ||{}, false, Some(PERM|1), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
+
+ // the following three with run_onion_failure_test_with_fail_intercept() test only the origin node
+ // receiving simulated fail messages
+ // intermediate node failure
+ run_onion_failure_test_with_fail_intercept("temporary_node_failure", 100, &nodes, &route, &payment_hash, |msg| {
+ // trigger error
+ msg.amount_msat -= 1;
+ }, |msg| {
+ // and tamper returning error message
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], NODE|2, &[0;0]);
+ }, ||{}, true, Some(NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[0].pubkey, is_permanent: false}));
+
+ // final node failure
+ run_onion_failure_test_with_fail_intercept("temporary_node_failure", 200, &nodes, &route, &payment_hash, |_msg| {}, |msg| {
+ // and tamper returning error message
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], NODE|2, &[0;0]);
+ }, ||{
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ }, true, Some(NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: false}));
+
+ // intermediate node failure
+ run_onion_failure_test_with_fail_intercept("permanent_node_failure", 100, &nodes, &route, &payment_hash, |msg| {
+ msg.amount_msat -= 1;
+ }, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|NODE|2, &[0;0]);
+ }, ||{}, true, Some(PERM|NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[0].pubkey, is_permanent: true}));
+
+ // final node failure
+ run_onion_failure_test_with_fail_intercept("permanent_node_failure", 200, &nodes, &route, &payment_hash, |_msg| {}, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|2, &[0;0]);
+ }, ||{
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ }, false, Some(PERM|NODE|2), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: true}));
+
+ // intermediate node failure
+ run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 100, &nodes, &route, &payment_hash, |msg| {
+ msg.amount_msat -= 1;
+ }, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|NODE|3, &[0;0]);
+ }, ||{
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ }, true, Some(PERM|NODE|3), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[0].pubkey, is_permanent: true}));
+
+ // final node failure
+ run_onion_failure_test_with_fail_intercept("required_node_feature_missing", 200, &nodes, &route, &payment_hash, |_msg| {}, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[1].shared_secret[..], PERM|NODE|3, &[0;0]);
+ }, ||{
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ }, false, Some(PERM|NODE|3), Some(msgs::HTLCFailChannelUpdate::NodeFailure{node_id: route.hops[1].pubkey, is_permanent: true}));
+
+ run_onion_failure_test("invalid_onion_version", 0, &nodes, &route, &payment_hash, |msg| { msg.onion_routing_packet.version = 1; }, ||{}, true,
+ Some(BADONION|PERM|4), None);
+
+ run_onion_failure_test("invalid_onion_hmac", 0, &nodes, &route, &payment_hash, |msg| { msg.onion_routing_packet.hmac = [3; 32]; }, ||{}, true,
+ Some(BADONION|PERM|5), None);
+
+ run_onion_failure_test("invalid_onion_key", 0, &nodes, &route, &payment_hash, |msg| { msg.onion_routing_packet.public_key = Err(secp256k1::Error::InvalidPublicKey);}, ||{}, true,
+ Some(BADONION|PERM|6), None);
+
+ run_onion_failure_test_with_fail_intercept("temporary_channel_failure", 100, &nodes, &route, &payment_hash, |msg| {
+ msg.amount_msat -= 1;
+ }, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], UPDATE|7, &ChannelUpdate::dummy().encode_with_len()[..]);
+ }, ||{}, true, Some(UPDATE|7), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
+
+ run_onion_failure_test_with_fail_intercept("permanent_channel_failure", 100, &nodes, &route, &payment_hash, |msg| {
+ msg.amount_msat -= 1;
+ }, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|8, &[0;0]);
+ // short_channel_id from the processing node
+ }, ||{}, true, Some(PERM|8), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
+
+ run_onion_failure_test_with_fail_intercept("required_channel_feature_missing", 100, &nodes, &route, &payment_hash, |msg| {
+ msg.amount_msat -= 1;
+ }, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ msg.reason = onion_utils::build_first_hop_failure_packet(&onion_keys[0].shared_secret[..], PERM|9, &[0;0]);
+ // short_channel_id from the processing node
+ }, ||{}, true, Some(PERM|9), Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: channels[1].0.contents.short_channel_id, is_permanent: true}));
+
+ let mut bogus_route = route.clone();
+ bogus_route.hops[1].short_channel_id -= 1;
+ run_onion_failure_test("unknown_next_peer", 0, &nodes, &bogus_route, &payment_hash, |_| {}, ||{}, true, Some(PERM|10),
+ Some(msgs::HTLCFailChannelUpdate::ChannelClosed{short_channel_id: bogus_route.hops[1].short_channel_id, is_permanent:true}));
+
+ let amt_to_forward = nodes[1].node.channel_state.lock().unwrap().by_id.get(&channels[1].2).unwrap().get_their_htlc_minimum_msat() - 1;
+ let mut bogus_route = route.clone();
+ let route_len = bogus_route.hops.len();
+ bogus_route.hops[route_len-1].fee_msat = amt_to_forward;
+ run_onion_failure_test("amount_below_minimum", 0, &nodes, &bogus_route, &payment_hash, |_| {}, ||{}, true, Some(UPDATE|11), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
+
+ //TODO: with new config API, we will be able to generate both valid and
+ //invalid channel_update cases.
+ run_onion_failure_test("fee_insufficient", 0, &nodes, &route, &payment_hash, |msg| {
+ msg.amount_msat -= 1;
+ }, || {}, true, Some(UPDATE|12), Some(msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id: channels[0].0.contents.short_channel_id, is_permanent: true}));
+
+ run_onion_failure_test("incorrect_cltv_expiry", 0, &nodes, &route, &payment_hash, |msg| {
+ // need to violate: cltv_expiry - cltv_expiry_delta >= outgoing_cltv_value
+ msg.cltv_expiry -= 1;
+ }, || {}, true, Some(UPDATE|13), Some(msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id: channels[0].0.contents.short_channel_id, is_permanent: true}));
+
+ run_onion_failure_test("expiry_too_soon", 0, &nodes, &route, &payment_hash, |msg| {
+ let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
+ }, ||{}, true, Some(UPDATE|14), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
+
+ run_onion_failure_test("unknown_payment_hash", 2, &nodes, &route, &payment_hash, |_| {}, || {
+ nodes[2].node.fail_htlc_backwards(&payment_hash);
+ }, false, Some(PERM|15), None);
+
+ run_onion_failure_test("final_expiry_too_soon", 1, &nodes, &route, &payment_hash, |msg| {
+ let height = msg.cltv_expiry - CLTV_CLAIM_BUFFER - LATENCY_GRACE_PERIOD_BLOCKS + 1;
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[2].chain_monitor.block_connected_checked(&header, height, &Vec::new()[..], &[0; 0]);
+ }, || {}, true, Some(17), None);
+
+ run_onion_failure_test("final_incorrect_cltv_expiry", 1, &nodes, &route, &payment_hash, |_| {}, || {
+ for (_, pending_forwards) in nodes[1].node.channel_state.lock().unwrap().borrow_parts().forward_htlcs.iter_mut() {
+ for f in pending_forwards.iter_mut() {
+ match f {
+ &mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
+ forward_info.outgoing_cltv_value += 1,
+ _ => {},
+ }
+ }
+ }
+ }, true, Some(18), None);
+
+ run_onion_failure_test("final_incorrect_htlc_amount", 1, &nodes, &route, &payment_hash, |_| {}, || {
+ // violate amt_to_forward > msg.amount_msat
+ for (_, pending_forwards) in nodes[1].node.channel_state.lock().unwrap().borrow_parts().forward_htlcs.iter_mut() {
+ for f in pending_forwards.iter_mut() {
+ match f {
+ &mut HTLCForwardInfo::AddHTLC { ref mut forward_info, .. } =>
+ forward_info.amt_to_forward -= 1,
+ _ => {},
+ }
+ }
+ }
+ }, true, Some(19), None);
+
+ run_onion_failure_test("channel_disabled", 0, &nodes, &route, &payment_hash, |_| {}, || {
+ // disconnect event to the channel between nodes[1] ~ nodes[2]
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false);
+ nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ }, true, Some(UPDATE|20), Some(msgs::HTLCFailChannelUpdate::ChannelUpdateMessage{msg: ChannelUpdate::dummy()}));
+ reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ run_onion_failure_test("expiry_too_far", 0, &nodes, &route, &payment_hash, |msg| {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let mut route = route.clone();
+ let height = 1;
+ route.hops[1].cltv_expiry_delta += CLTV_FAR_FAR_AWAY + route.hops[0].cltv_expiry_delta + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route, &session_priv).unwrap();
+ let (onion_payloads, _, htlc_cltv) = onion_utils::build_onion_payloads(&route, height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &payment_hash);
+ msg.cltv_expiry = htlc_cltv;
+ msg.onion_routing_packet = onion_packet;
+ }, ||{}, true, Some(21), None);
+}
+
+#[test]
+#[should_panic]
+fn bolt2_open_channel_sending_node_checks_part1() { //This test needs to be on its own as we are catching a panic
+ let nodes = create_network(2, &[None, None]);
+ //Force duplicate channel ids
+ for node in nodes.iter() {
+ *node.keys_manager.override_channel_id_priv.lock().unwrap() = Some([0; 32]);
+ }
+
+ // BOLT #2 spec: Sending node must ensure temporary_channel_id is unique from any other channel ID with the same peer.
+ let channel_value_satoshis=10000;
+ let push_msat=10001;
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).unwrap();
+ let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &node0_to_1_send_open_channel).unwrap();
+
+ //Create a second channel with a channel_id collision
+ assert!(nodes[0].node.create_channel(nodes[0].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err());
+}
+
+#[test]
+fn bolt2_open_channel_sending_node_checks_part2() {
+ let nodes = create_network(2, &[None, None]);
+
+ // BOLT #2 spec: Sending node must set funding_satoshis to less than 2^24 satoshis
+ let channel_value_satoshis=2^24;
+ let push_msat=10001;
+ assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err());
+
+ // BOLT #2 spec: Sending node must set push_msat to equal or less than 1000 * funding_satoshis
+ let channel_value_satoshis=10000;
+ // Test when push_msat is equal to 1000 * funding_satoshis.
+ let push_msat=1000*channel_value_satoshis+1;
+ assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_err());
+
+ // BOLT #2 spec: Sending node must set set channel_reserve_satoshis greater than or equal to dust_limit_satoshis
+ let channel_value_satoshis=10000;
+ let push_msat=10001;
+ assert!(nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), channel_value_satoshis, push_msat, 42).is_ok()); //Create a valid channel
+ let node0_to_1_send_open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ assert!(node0_to_1_send_open_channel.channel_reserve_satoshis>=node0_to_1_send_open_channel.dust_limit_satoshis);
+
+ // BOLT #2 spec: Sending node must set undefined bits in channel_flags to 0
+ // Only the least-significant bit of channel_flags is currently defined resulting in channel_flags only having one of two possible states 0 or 1
+ assert!(node0_to_1_send_open_channel.channel_flags<=1);
+
+ // BOLT #2 spec: Sending node should set to_self_delay sufficient to ensure the sender can irreversibly spend a commitment transaction output, in case of misbehaviour by the receiver.
+ assert!(BREAKDOWN_TIMEOUT>0);
+ assert!(node0_to_1_send_open_channel.to_self_delay==BREAKDOWN_TIMEOUT);
+
+ // BOLT #2 spec: Sending node must ensure the chain_hash value identifies the chain it wishes to open the channel within.
+ let chain_hash=genesis_block(Network::Testnet).header.bitcoin_hash();
+ assert_eq!(node0_to_1_send_open_channel.chain_hash,chain_hash);
+
+ // BOLT #2 spec: Sending node must set funding_pubkey, revocation_basepoint, htlc_basepoint, payment_basepoint, and delayed_payment_basepoint to valid DER-encoded, compressed, secp256k1 pubkeys.
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.funding_pubkey.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.revocation_basepoint.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.htlc_basepoint.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.payment_basepoint.serialize()).is_ok());
+ assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok());
+}
+
+// BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
+// BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
+//TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
+
+#[test]
+fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() {
+ //BOLT2 Requirement: MUST offer amount_msat greater than 0.
+ //BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
+ let mut nodes = create_network(2, &[None, None]);
+ let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
+ let mut route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ route.hops[0].fee_msat = 0;
+
+ let err = nodes[0].node.send_payment(route, our_payment_hash);
+
+ if let Err(APIError::ChannelUnavailable{err}) = err {
+ assert_eq!(err, "Cannot send less than their minimum HTLC value");
+ } else {
+ assert!(false);
+ }
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() {
+ //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
+ //It is enforced when constructing a route.
+ let mut nodes = create_network(2, &[None, None]);
+ let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0, LocalFeatures::new(), LocalFeatures::new());
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000000, 500000001).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let err = nodes[0].node.send_payment(route, our_payment_hash);
+
+ if let Err(APIError::RouteError{err}) = err {
+ assert_eq!(err, "Channel CLTV overflowed?!");
+ } else {
+ assert!(false);
+ }
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() {
+ //BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
+ //BOLT 2 Requirement: for the first HTLC it offers MUST set id to 0.
+ //BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, LocalFeatures::new(), LocalFeatures::new());
+ let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().their_max_accepted_htlcs as u64;
+
+ for i in 0..max_accepted_htlcs {
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ let payment_event = {
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ if let MessageSendEvent::UpdateHTLCs { node_id: _, updates: msgs::CommitmentUpdate{ update_add_htlcs: ref htlcs, .. }, } = events[0] {
+ assert_eq!(htlcs[0].htlc_id, i);
+ } else {
+ assert!(false);
+ }
+ SendEvent::from_event(events.remove(0))
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_payment_received!(nodes[1], our_payment_hash, 100000);
+ }
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ let err = nodes[0].node.send_payment(route, our_payment_hash);
+
+ if let Err(APIError::ChannelUnavailable{err}) = err {
+ assert_eq!(err, "Cannot push more than their max accepted HTLCs");
+ } else {
+ assert!(false);
+ }
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() {
+ //BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
+ let mut nodes = create_network(2, &[None, None]);
+ let channel_value = 100000;
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, 0, LocalFeatures::new(), LocalFeatures::new());
+ let max_in_flight = get_channel_value_stat!(nodes[0], chan.2).their_max_htlc_value_in_flight_msat;
+
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight);
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], max_in_flight+1, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ let err = nodes[0].node.send_payment(route, our_payment_hash);
+
+ if let Err(APIError::ChannelUnavailable{err}) = err {
+ assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept");
+ } else {
+ assert!(false);
+ }
+
+ send_payment(&nodes[0], &[&nodes[1]], max_in_flight);
+}
+
+// BOLT 2 Requirements for the Receiver when handling an update_add_htlc message.
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
+ //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
+ let htlc_minimum_msat: u64;
+ {
+ let chan_lock = nodes[0].node.channel_state.lock().unwrap();
+ let channel = chan_lock.by_id.get(&chan.2).unwrap();
+ htlc_minimum_msat = channel.get_our_htlc_minimum_msat();
+ }
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], htlc_minimum_msat, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote side tried to send less than our minimum HTLC value");
+ } else {
+ assert!(false);
+ }
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1]);
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
+ //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
+
+ let their_channel_reserve = get_channel_value_stat!(nodes[0], chan.2).channel_reserve_msat;
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 5000000-their_channel_reserve, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ updates.update_add_htlcs[0].amount_msat = 5000000-their_channel_reserve+1;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote HTLC add would put them over their reserve value");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1]);
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
+ //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
+ //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let session_priv = SecretKey::from_slice(&{
+ let mut session_key = [0; 32];
+ let mut rng = thread_rng();
+ rng.fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::signing_only(), &route, &session_priv).unwrap();
+ let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+
+ let mut msg = msgs::UpdateAddHTLC {
+ channel_id: chan.2,
+ htlc_id: 0,
+ amount_msat: 1000,
+ payment_hash: our_payment_hash,
+ cltv_expiry: htlc_cltv,
+ onion_routing_packet: onion_packet.clone(),
+ };
+
+ for i in 0..super::channel::OUR_MAX_HTLCS {
+ msg.htlc_id = i as u64;
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).unwrap();
+ }
+ msg.htlc_id = (super::channel::OUR_MAX_HTLCS) as u64;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
+
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote tried to push more than our max accepted HTLCs");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1]);
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
+ //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ updates.update_add_htlcs[0].amount_msat = get_channel_value_stat!(nodes[1], chan.2).their_max_htlc_value_in_flight_msat + 1;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err,"Remote HTLC add would put them over our max HTLC value");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1]);
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
+ //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, LocalFeatures::new(), LocalFeatures::new());
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ updates.update_add_htlcs[0].cltv_expiry = 500000000;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1]);
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
+ //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
+ // We test this by first testing that that repeated HTLCs pass commitment signature checks
+ // after disconnect and that non-sequential htlc_ids result in a channel failure.
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+
+ //Disconnect and Reconnect
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ assert_eq!(reestablish_1.len(), 1);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ assert_eq!(reestablish_2.len(), 1);
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ //Resend HTLC
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+ assert_eq!(updates.commitment_signed.htlc_signatures.len(), 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let _bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote skipped HTLC ID");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[1].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[1]);
+}
+
+#[test]
+fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() {
+ //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
+
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
+ let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+
+ let update_msg = msgs::UpdateFulfillHTLC{
+ channel_id: chan.2,
+ htlc_id: 0,
+ payment_preimage: our_payment_preimage,
+ };
+
+ let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
+
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0]);
+}
+
+#[test]
+fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() {
+ //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
+
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+
+ let update_msg = msgs::UpdateFailHTLC{
+ channel_id: chan.2,
+ htlc_id: 0,
+ reason: msgs::OnionErrorPacket { data: Vec::new()},
+ };
+
+ let err = nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
+
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0]);
+}
+
+#[test]
+fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() {
+ //BOLT 2 Requirement: until the corresponding HTLC is irrevocably committed in both sides' commitment transactions: MUST NOT send an update_fulfill_htlc, update_fail_htlc, or update_fail_malformed_htlc.
+
+ let mut nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+
+ let update_msg = msgs::UpdateFailMalformedHTLC{
+ channel_id: chan.2,
+ htlc_id: 0,
+ sha256_of_onion: [1; 32],
+ failure_code: 0x8000,
+ };
+
+ let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
+
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote tried to fulfill/fail HTLC before it had been committed");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0]);
+}
+
+#[test]
+fn test_update_fulfill_htlc_bolt2_incorrect_htlc_id() {
+ //BOLT 2 Requirement: A receiving node: if the id does not correspond to an HTLC in its current commitment transaction MUST fail the channel.
+
+ let nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
+
+ nodes[1].node.claim_funds(our_payment_preimage);
+ check_added_monitors!(nodes[1], 1);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+ update_fulfill_htlcs[0].clone()
+ },
+ _ => panic!("Unexpected event"),
+ }
+ };
+
+ update_fulfill_msg.htlc_id = 1;
+
+ let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote tried to fulfill/fail an HTLC we couldn't find");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0]);
+}
+
+#[test]
+fn test_update_fulfill_htlc_bolt2_wrong_preimage() {
+ //BOLT 2 Requirement: A receiving node: if the payment_preimage value in update_fulfill_htlc doesn't SHA256 hash to the corresponding HTLC payment_hash MUST fail the channel.
+
+ let nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let our_payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 100000).0;
+
+ nodes[1].node.claim_funds(our_payment_preimage);
+ check_added_monitors!(nodes[1], 1);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let mut update_fulfill_msg: msgs::UpdateFulfillHTLC = {
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert_eq!(update_fulfill_htlcs.len(), 1);
+ assert!(update_fail_htlcs.is_empty());
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+ update_fulfill_htlcs[0].clone()
+ },
+ _ => panic!("Unexpected event"),
+ }
+ };
+
+ update_fulfill_msg.payment_preimage = PaymentPreimage([1; 32]);
+
+ let err = nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_msg);
- if let Err(msgs::HandleError{err, action: Some(msgs::ErrorAction::SendErrorMessage {..})}) = err {
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Remote tried to fulfill HTLC with an incorrect preimage");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0]);
+}
+
+
+#[test]
+fn test_update_fulfill_htlc_bolt2_missing_badonion_bit_for_malformed_htlc_message() {
+ //BOLT 2 Requirement: A receiving node: if the BADONION bit in failure_code is not set for update_fail_malformed_htlc MUST fail the channel.
+
+ let mut nodes = create_network(2, &[None, None]);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ updates.update_add_htlcs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]).unwrap();
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], updates.commitment_signed, false, true);
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+
+ let mut update_msg: msgs::UpdateFailMalformedHTLC = {
+ match events[0] {
+ MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert_eq!(update_fail_malformed_htlcs.len(), 1);
+ assert!(update_fee.is_none());
+ update_fail_malformed_htlcs[0].clone()
+ },
+ _ => panic!("Unexpected event"),
+ }
+ };
+ update_msg.failure_code &= !0x8000;
+ let err = nodes[0].node.handle_update_fail_malformed_htlc(&nodes[1].node.get_our_node_id(), &update_msg);
- if let Some(error) = error.action {
- match error {
- ErrorAction::SendErrorMessage { msg } => {
- assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
- },
- _ => { assert!(false); }
- }
- } else { assert!(false); }
++ if let Err(msgs::LightningError{err, action: msgs::ErrorAction::SendErrorMessage {..}}) = err {
+ assert_eq!(err, "Got update_fail_malformed_htlc with BADONION not set");
+ } else {
+ assert!(false);
+ }
+
+ assert!(nodes[0].node.list_channels().is_empty());
+ check_closed_broadcast!(nodes[0]);
+}
+
+#[test]
+fn test_update_fulfill_htlc_bolt2_after_malformed_htlc_message_must_forward_update_fail_htlc() {
+ //BOLT 2 Requirement: a receiving node which has an outgoing HTLC canceled by update_fail_malformed_htlc:
+ // * MUST return an error in the update_fail_htlc sent to the link which originally sent the HTLC, using the failure_code given and setting the data to sha256_of_onion.
+
+ let mut nodes = create_network(3, &[None, None, None]);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
+ create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
+
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ //First hop
+ let mut payment_event = {
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ let mut events_2 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_2.len(), 1);
+ check_added_monitors!(nodes[1], 1);
+ payment_event = SendEvent::from_event(events_2.remove(0));
+ assert_eq!(payment_event.msgs.len(), 1);
+
+ //Second Hop
+ payment_event.msgs[0].onion_routing_packet.version = 1; //Produce a malformed HTLC message
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ check_added_monitors!(nodes[2], 0);
+ commitment_signed_dance!(nodes[2], nodes[1], payment_event.commitment_msg, false, true);
+
+ let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_3.len(), 1);
+ let update_msg : (msgs::UpdateFailMalformedHTLC, msgs::CommitmentSigned) = {
+ match events_3[0] {
+ MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fulfill_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert_eq!(update_fail_malformed_htlcs.len(), 1);
+ assert!(update_fee.is_none());
+ (update_fail_malformed_htlcs[0].clone(), commitment_signed.clone())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ };
+
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), &update_msg.0).unwrap();
+
+ check_added_monitors!(nodes[1], 0);
+ commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events_4.len(), 1);
+
+ //Confirm that handlinge the update_malformed_htlc message produces an update_fail_htlc message to be forwarded back along the route
+ match events_4[0] {
+ MessageSendEvent::UpdateHTLCs { node_id: _ , updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fulfill_htlcs.is_empty());
+ assert_eq!(update_fail_htlcs.len(), 1);
+ assert!(update_fail_malformed_htlcs.is_empty());
+ assert!(update_fee.is_none());
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ check_added_monitors!(nodes[1], 1);
+}
+
+fn do_test_failure_delay_dust_htlc_local_commitment(announce_latest: bool) {
+ // Dust-HTLC failure updates must be delayed until failure-trigger tx (in this case local commitment) reach ANTI_REORG_DELAY
+ // We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
+ // HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
+
+ let nodes = create_network(2, &[None, None]);
+ let chan =create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+
+ // We route 2 dust-HTLCs between A and B
+ let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+ let (_, payment_hash_2) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+ route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ // Cache one local commitment tx as previous
+ let as_prev_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+ // Fail one HTLC to prune it in the will-be-latest-local commitment tx
+ assert!(nodes[1].node.fail_htlc_backwards(&payment_hash_2));
+ check_added_monitors!(nodes[1], 0);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &remove.update_fail_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &remove.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ // Cache one local commitment tx as lastest
+ let as_last_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ match events[0] {
+ MessageSendEvent::SendRevokeAndACK { node_id, .. } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::UpdateHTLCs { node_id, .. } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ assert_ne!(as_prev_commitment_tx, as_last_commitment_tx);
+ // Fail the 2 dust-HTLCs, move their failure in maturation buffer (htlc_updated_waiting_threshold_conf)
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ if announce_latest {
+ nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_last_commitment_tx[0]], &[1; 1]);
+ } else {
+ nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_prev_commitment_tx[0]], &[1; 1]);
+ }
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+ connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 1, true, header.bitcoin_hash());
+ let events = nodes[0].node.get_and_clear_pending_events();
+ // Only 2 PaymentFailed events should show up, over-dust HTLC has to be failed by timeout tx
+ assert_eq!(events.len(), 2);
+ let mut first_failed = false;
+ for event in events {
+ match event {
+ Event::PaymentFailed { payment_hash, .. } => {
+ if payment_hash == payment_hash_1 {
+ assert!(!first_failed);
+ first_failed = true;
+ } else {
+ assert_eq!(payment_hash, payment_hash_2);
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+#[test]
+fn test_failure_delay_dust_htlc_local_commitment() {
+ do_test_failure_delay_dust_htlc_local_commitment(true);
+ do_test_failure_delay_dust_htlc_local_commitment(false);
+}
+
+#[test]
+fn test_no_failure_dust_htlc_local_commitment() {
+ // Transaction filters for failing back dust htlc based on local commitment txn infos has been
+ // prone to error, we test here that a dummy transaction don't fail them.
+
+ let nodes = create_network(2, &[None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ // Rebalance a bit
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+
+ let as_dust_limit = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+ let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+
+ // We route 2 dust-HTLCs between A and B
+ let (preimage_1, _) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+ let (preimage_2, _) = route_payment(&nodes[1], &[&nodes[0]], as_dust_limit*1000);
+
+ // Build a dummy invalid transaction trying to spend a commitment tx
+ let input = TxIn {
+ previous_output: BitcoinOutPoint { txid: chan.3.txid(), vout: 0 },
+ script_sig: Script::new(),
+ sequence: 0,
+ witness: Vec::new(),
+ };
+
+ let outp = TxOut {
+ script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
+ value: 10000,
+ };
+
+ let dummy_tx = Transaction {
+ version: 2,
+ lock_time: 0,
+ input: vec![input],
+ output: vec![outp]
+ };
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chan_monitor.simple_monitor.block_connected(&header, 1, &[&dummy_tx], &[1;1]);
+ assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+ assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
+ // We broadcast a few more block to check everything is all right
+ connect_blocks(&nodes[0].chain_monitor, 20, 1, true, header.bitcoin_hash());
+ assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+ assert_eq!(nodes[0].node.get_and_clear_pending_msg_events().len(), 0);
+
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], preimage_1);
+ claim_payment(&nodes[1], &vec!(&nodes[0])[..], preimage_2);
+}
+
+fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) {
+ // Outbound HTLC-failure updates must be cancelled if we get a reorg before we reach ANTI_REORG_DELAY.
+ // Broadcast of revoked remote commitment tx, trigger failure-update of dust/non-dust HTLCs
+ // Broadcast of remote commitment tx, trigger failure-update of dust-HTLCs
+ // Broadcast of timeout tx on remote commitment tx, trigger failure-udate of non-dust HTLCs
+ // Broadcast of local commitment tx, trigger failure-update of dust-HTLCs
+ // Broadcast of HTLC-timeout tx on local commitment tx, trigger failure-update of non-dust HTLCs
+
+ let nodes = create_network(3, &[None, None, None]);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1, LocalFeatures::new(), LocalFeatures::new());
+
+ let bs_dust_limit = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+
+ let (_payment_preimage_1, dust_hash) = route_payment(&nodes[0], &[&nodes[1]], bs_dust_limit*1000);
+ let (_payment_preimage_2, non_dust_hash) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+
+ let as_commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+ let bs_commitment_tx = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+ // We revoked bs_commitment_tx
+ if revoked {
+ let (payment_preimage_3, _) = route_payment(&nodes[0], &[&nodes[1]], 1000000);
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
+ }
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ let mut timeout_tx = Vec::new();
+ if local {
+ // We fail dust-HTLC 1 by broadcast of local commitment tx
+ nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&as_commitment_tx[0]], &[1; 1]);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+ timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
+ let parent_hash = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, dust_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ // We fail non-dust-HTLC 2 by broadcast of local HTLC-timeout tx on local commitment tx
+ let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+ nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
+ let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, non_dust_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else {
+ // We fail dust-HTLC 1 by broadcast of remote commitment tx. If revoked, fail also non-dust HTLC
+ nodes[0].chain_monitor.block_connected_checked(&header, 1, &[&bs_commitment_tx[0]], &[1; 1]);
+ assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[0].clone());
+ let parent_hash = connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 2, true, header.bitcoin_hash());
+ let header_2 = BlockHeader { version: 0x20000000, prev_blockhash: parent_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ if !revoked {
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, dust_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx
+ nodes[0].chain_monitor.block_connected_checked(&header_2, 7, &[&timeout_tx[0]], &[1; 1]);
+ assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0);
+ let header_3 = BlockHeader { version: 0x20000000, prev_blockhash: header_2.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ connect_blocks(&nodes[0].chain_monitor, ANTI_REORG_DELAY - 1, 8, true, header_3.bitcoin_hash());
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ assert_eq!(payment_hash, non_dust_hash);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ } else {
+ // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked
+ // commitment tx
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ let first;
+ match events[0] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ if payment_hash == dust_hash { first = true; }
+ else { first = false; }
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentFailed { payment_hash, .. } => {
+ if first { assert_eq!(payment_hash, non_dust_hash); }
+ else { assert_eq!(payment_hash, dust_hash); }
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ }
+}
+
+#[test]
+fn test_sweep_outbound_htlc_failure_update() {
+ do_test_sweep_outbound_htlc_failure_update(false, true);
+ do_test_sweep_outbound_htlc_failure_update(false, false);
+ do_test_sweep_outbound_htlc_failure_update(true, false);
+}
+
+#[test]
+fn test_upfront_shutdown_script() {
+ // BOLT 2 : Option upfront shutdown script, if peer commit its closing_script at channel opening
+ // enforce it at shutdown message
+
+ let mut config = UserConfig::new();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let nodes = create_network(3, &[None, Some(config), None]);
+
+ // We test that in case of peer committing upfront to a script, if it changes at closing, we refuse to sign
+ let flags = LocalFeatures::new();
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
+ let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
+ // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer
+ if let Err(error) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {
- if let Some(error) = error.action {
- match error {
- ErrorAction::SendErrorMessage { msg } => {
- assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period");
- },
- _ => { assert!(false); }
- }
- } else { assert!(false); }
++ match error.action {
++ ErrorAction::SendErrorMessage { msg } => {
++ assert_eq!(msg.data,"Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey");
++ },
++ _ => { assert!(false); }
++ }
+ } else { assert!(false); }
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
+ let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
+ // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
+ if let Ok(_) = nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown) {}
+ else { assert!(false) }
+ let events = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+
+ // We test that if case of peer non-signaling we don't enforce committed script at channel opening
+ let mut flags_no = LocalFeatures::new();
+ flags_no.unset_upfront_shutdown_script();
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags_no, flags.clone());
+ nodes[0].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
+ let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
+ node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
+ if let Ok(_) = nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown) {}
+ else { assert!(false) }
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+
+ // We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
+ // channel smoothly, opt-out is from channel initiator here
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 0, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
+ if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {}
+ else { assert!(false) }
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+
+ //// We test that if user opt-out, we provide a zero-length script at channel opening and we are able to close
+ //// channel smoothly
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, flags.clone(), flags.clone());
+ nodes[1].node.close_channel(&OutPoint::new(chan.3.txid(), 0).to_channel_id()).unwrap();
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
+ if let Ok(_) = nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown) {}
+ else { assert!(false) }
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_user_configurable_csv_delay() {
+ // We test our channel constructors yield errors when we pass them absurd csv delay
+
+ let mut low_our_to_self_config = UserConfig::new();
+ low_our_to_self_config.own_channel_config.our_to_self_delay = 6;
+ let mut high_their_to_self_config = UserConfig::new();
+ high_their_to_self_config.peer_channel_config_limits.their_to_self_delay = 100;
+ let nodes = create_network(2, &[Some(high_their_to_self_config.clone()), None]);
+
+ // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
+ let keys_manager: Arc<KeysInterface> = Arc::new(KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::new(test_utils::TestLogger::new()), 10, 20));
+ if let Err(error) = Channel::new_outbound(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
+ match error {
+ APIError::APIMisuseError { err } => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
+ _ => panic!("Unexpected event"),
+ }
+ } else { assert!(false) }
+
+ // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_from_req()
+ nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
+ let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
+ open_channel.to_self_delay = 200;
+ if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), LocalFeatures::new(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &low_our_to_self_config) {
+ match error {
+ ChannelError::Close(err) => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); },
+ _ => panic!("Unexpected event"),
+ }
+ } else { assert!(false); }
+
+ // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Chanel::accept_channel()
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), LocalFeatures::new(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())).unwrap();
+ let mut accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
+ accept_channel.to_self_delay = 200;
+ if let Err(error) = nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), LocalFeatures::new(), &accept_channel) {
- if let Some(error) = err.action {
- match error {
- ErrorAction::SendErrorMessage { msg } => {
- assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
- },
- _ => panic!("Unexpected event!"),
- }
- } else { assert!(false); }
++ match error.action {
++ ErrorAction::SendErrorMessage { msg } => {
++ assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period");
++ },
++ _ => { assert!(false); }
++ }
+ } else { assert!(false); }
+
+ // We test msg.to_self_delay <= config.their_to_self_delay is enforced in Channel::new_from_req()
+ nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42).unwrap();
+ let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
+ open_channel.to_self_delay = 200;
+ if let Err(error) = Channel::new_from_req(&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), LocalFeatures::new(), &open_channel, 0, Arc::new(test_utils::TestLogger::new()), &high_their_to_self_config) {
+ match error {
+ ChannelError::Close(err) => { assert_eq!(err, "They wanted our payments to be delayed by a needlessly long period"); },
+ _ => panic!("Unexpected event"),
+ }
+ } else { assert!(false); }
+}
+
+#[test]
+fn test_data_loss_protect() {
+ // We want to be sure that :
+ // * we don't broadcast our Local Commitment Tx in case of fallen behind
+ // * we close channel in case of detecting other being fallen behind
+ // * we are able to claim our own outputs thanks to remote my_current_per_commitment_point
+ let mut nodes = create_network(2, &[None, None]);
+
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, LocalFeatures::new(), LocalFeatures::new());
+
+ // Cache node A state before any channel update
+ let previous_node_state = nodes[0].node.encode();
+ let mut previous_chan_monitor_state = test_utils::TestVecWriter(Vec::new());
+ nodes[0].chan_monitor.simple_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write_for_disk(&mut previous_chan_monitor_state).unwrap();
+
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ // Restore node A from previous state
+ let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::with_id(format!("node {}", 0)));
+ let chan_monitor = <(Sha256dHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chan_monitor_state.0), Arc::clone(&logger)).unwrap().1;
+ let chain_monitor = Arc::new(ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
+ let tx_broadcaster = Arc::new(test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())});
+ let feeest = Arc::new(test_utils::TestFeeEstimator { sat_per_kw: 253 });
+ let monitor = Arc::new(test_utils::TestChannelMonitor::new(chain_monitor.clone(), tx_broadcaster.clone(), logger.clone(), feeest.clone()));
+ let mut channel_monitors = HashMap::new();
+ channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &chan_monitor);
+ let node_state_0 = <(Sha256dHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+ keys_manager: Arc::new(keysinterface::KeysManager::new(&nodes[0].node_seed, Network::Testnet, Arc::clone(&logger), 42, 21)),
+ fee_estimator: feeest.clone(),
+ monitor: monitor.clone(),
+ chain_monitor: chain_monitor.clone(),
+ logger: Arc::clone(&logger),
+ tx_broadcaster,
+ default_config: UserConfig::new(),
+ channel_monitors: &channel_monitors
+ }).unwrap().1;
+ nodes[0].node = Arc::new(node_state_0);
+ monitor.add_update_monitor(OutPoint { txid: chan.3.txid(), index: 0 }, chan_monitor.clone()).is_ok();
+ nodes[0].chan_monitor = monitor;
+ nodes[0].chain_monitor = chain_monitor;
+ check_added_monitors!(nodes[0], 1);
+
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+
+ let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+
+ // Check we update monitor following learning of per_commitment_point from B
+ if let Err(err) = nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]) {
- if let Some(error) = err.action {
- match error {
- ErrorAction::SendErrorMessage { msg } => {
- assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
- _ => panic!("Unexpected event!"),
- }
- } else { assert!(false); }
++ match err.action {
++ ErrorAction::SendErrorMessage { msg } => {
++ assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting");
++ },
++ _ => panic!("Unexpected event!"),
++ }
+ } else { assert!(false); }
+ check_added_monitors!(nodes[0], 1);
+
+ {
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ assert_eq!(node_txn.len(), 0);
+ }
+
+ let mut reestablish_1 = Vec::with_capacity(1);
+ for msg in nodes[0].node.get_and_clear_pending_msg_events() {
+ if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ reestablish_1.push(msg.clone());
+ } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg {
+ } else {
+ panic!("Unexpected event")
+ }
+ }
+
+ // Check we close channel detecting A is fallen-behind
+ if let Err(err) = nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]) {
++ match err.action {
++ ErrorAction::SendErrorMessage { msg } => {
++ assert_eq!(msg.data, "Peer attempted to reestablish channel with a very old local commitment transaction"); },
++ _ => panic!("Unexpected event!"),
++ }
+ } else { assert!(false); }
+
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+
+ // Check A is able to claim to_remote output
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone();
+ assert_eq!(node_txn.len(), 1);
+ check_spends!(node_txn[0], chan.3.clone());
+ assert_eq!(node_txn[0].output.len(), 2);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()]}, 1);
+ let spend_txn = check_spendable_outputs!(nodes[0], 1);
+ assert_eq!(spend_txn.len(), 1);
+ check_spends!(spend_txn[0], node_txn[0].clone());
+}
--- /dev/null
- /// Used to put an error message in a HandleError
+//! Wire messages, traits representing wire message handlers, and a few error types live here.
+//!
+//! For a normal node you probably don't need to use anything here, however, if you wish to split a
+//! node into an internet-facing route/message socket handling daemon and a separate daemon (or
+//! server entirely) which handles only channel-related messages you may wish to implement
+//! ChannelMessageHandler yourself and use it to re-serialize messages and pass them across
+//! daemons/servers.
+//!
+//! Note that if you go with such an architecture (instead of passing raw socket events to a
+//! non-internet-facing system) you trust the frontend internet-facing system to not lie about the
+//! source node_id of the message, however this does allow you to significantly reduce bandwidth
+//! between the systems as routing messages can represent a significant chunk of bandwidth usage
+//! (especially for non-channel-publicly-announcing nodes). As an alternate design which avoids
+//! this issue, if you have sufficient bidirectional bandwidth between your systems, you may send
+//! raw socket events into your non-internet-facing system and then send routing events back to
+//! track the network on the less-secure system.
+
+use secp256k1::key::PublicKey;
+use secp256k1::Signature;
+use secp256k1;
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+use bitcoin::blockdata::script::Script;
+
+use std::error::Error;
+use std::{cmp, fmt};
+use std::io::Read;
+use std::result::Result;
+
+use util::events;
+use util::ser::{Readable, Writeable, Writer};
+
+use ln::channelmanager::{PaymentPreimage, PaymentHash};
+
+/// An error in decoding a message or struct.
+#[derive(Debug)]
+pub enum DecodeError {
+ /// A version byte specified something we don't know how to handle.
+ /// Includes unknown realm byte in an OnionHopData packet
+ UnknownVersion,
+ /// Unknown feature mandating we fail to parse message
+ UnknownRequiredFeature,
+ /// Value was invalid, eg a byte which was supposed to be a bool was something other than a 0
+ /// or 1, a public key/private key/signature was invalid, text wasn't UTF-8, etc
+ InvalidValue,
+ /// Buffer too short
+ ShortRead,
+ /// node_announcement included more than one address of a given type!
+ ExtraAddressesPerType,
+ /// A length descriptor in the packet didn't describe the later data correctly
+ BadLengthDescriptor,
+ /// Error from std::io
+ Io(::std::io::Error),
+}
+
+/// Tracks localfeatures which are only in init messages
+#[derive(Clone, PartialEq)]
+pub struct LocalFeatures {
+ flags: Vec<u8>,
+}
+
+impl LocalFeatures {
+ /// Create a blank LocalFeatures flags (visibility extended for fuzz tests)
+ #[cfg(not(feature = "fuzztarget"))]
+ pub(crate) fn new() -> LocalFeatures {
+ LocalFeatures {
+ flags: vec![2 | 1 << 5],
+ }
+ }
+ #[cfg(feature = "fuzztarget")]
+ pub fn new() -> LocalFeatures {
+ LocalFeatures {
+ flags: vec![2 | 1 << 5],
+ }
+ }
+
+ pub(crate) fn supports_data_loss_protect(&self) -> bool {
+ self.flags.len() > 0 && (self.flags[0] & 3) != 0
+ }
+ pub(crate) fn initial_routing_sync(&self) -> bool {
+ self.flags.len() > 0 && (self.flags[0] & (1 << 3)) != 0
+ }
+ pub(crate) fn set_initial_routing_sync(&mut self) {
+ if self.flags.len() == 0 {
+ self.flags.resize(1, 1 << 3);
+ } else {
+ self.flags[0] |= 1 << 3;
+ }
+ }
+
+ pub(crate) fn supports_upfront_shutdown_script(&self) -> bool {
+ self.flags.len() > 0 && (self.flags[0] & (3 << 4)) != 0
+ }
+ #[cfg(test)]
+ pub(crate) fn unset_upfront_shutdown_script(&mut self) {
+ self.flags[0] ^= 1 << 5;
+ }
+
+ pub(crate) fn requires_unknown_bits(&self) -> bool {
+ self.flags.iter().enumerate().any(|(idx, &byte)| {
+ ( idx != 0 && (byte & 0x55) != 0 ) || ( idx == 0 && (byte & 0x14) != 0 )
+ })
+ }
+
+ pub(crate) fn supports_unknown_bits(&self) -> bool {
+ self.flags.iter().enumerate().any(|(idx, &byte)| {
+ ( idx != 0 && byte != 0 ) || ( idx == 0 && (byte & 0xc4) != 0 )
+ })
+ }
+}
+
+/// Tracks globalfeatures which are in init messages and routing announcements
+#[derive(Clone, PartialEq, Debug)]
+pub struct GlobalFeatures {
+ #[cfg(not(test))]
+ flags: Vec<u8>,
+ // Used to test encoding of diverse msgs
+ #[cfg(test)]
+ pub flags: Vec<u8>
+}
+
+impl GlobalFeatures {
+ pub(crate) fn new() -> GlobalFeatures {
+ GlobalFeatures {
+ flags: Vec::new(),
+ }
+ }
+
+ pub(crate) fn requires_unknown_bits(&self) -> bool {
+ for &byte in self.flags.iter() {
+ if (byte & 0x55) != 0 {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ pub(crate) fn supports_unknown_bits(&self) -> bool {
+ for &byte in self.flags.iter() {
+ if byte != 0 {
+ return true;
+ }
+ }
+ return false;
+ }
+}
+
+/// An init message to be sent or received from a peer
+pub struct Init {
+ pub(crate) global_features: GlobalFeatures,
+ pub(crate) local_features: LocalFeatures,
+}
+
+/// An error message to be sent or received from a peer
+#[derive(Clone)]
+pub struct ErrorMessage {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) data: String,
+}
+
+/// A ping message to be sent or received from a peer
+pub struct Ping {
+ pub(crate) ponglen: u16,
+ pub(crate) byteslen: u16,
+}
+
+/// A pong message to be sent or received from a peer
+pub struct Pong {
+ pub(crate) byteslen: u16,
+}
+
+/// An open_channel message to be sent or received from a peer
+#[derive(Clone)]
+pub struct OpenChannel {
+ pub(crate) chain_hash: Sha256dHash,
+ pub(crate) temporary_channel_id: [u8; 32],
+ pub(crate) funding_satoshis: u64,
+ pub(crate) push_msat: u64,
+ pub(crate) dust_limit_satoshis: u64,
+ pub(crate) max_htlc_value_in_flight_msat: u64,
+ pub(crate) channel_reserve_satoshis: u64,
+ pub(crate) htlc_minimum_msat: u64,
+ pub(crate) feerate_per_kw: u32,
+ pub(crate) to_self_delay: u16,
+ pub(crate) max_accepted_htlcs: u16,
+ pub(crate) funding_pubkey: PublicKey,
+ pub(crate) revocation_basepoint: PublicKey,
+ pub(crate) payment_basepoint: PublicKey,
+ pub(crate) delayed_payment_basepoint: PublicKey,
+ pub(crate) htlc_basepoint: PublicKey,
+ pub(crate) first_per_commitment_point: PublicKey,
+ pub(crate) channel_flags: u8,
+ pub(crate) shutdown_scriptpubkey: OptionalField<Script>,
+}
+
+/// An accept_channel message to be sent or received from a peer
+#[derive(Clone)]
+pub struct AcceptChannel {
+ pub(crate) temporary_channel_id: [u8; 32],
+ pub(crate) dust_limit_satoshis: u64,
+ pub(crate) max_htlc_value_in_flight_msat: u64,
+ pub(crate) channel_reserve_satoshis: u64,
+ pub(crate) htlc_minimum_msat: u64,
+ pub(crate) minimum_depth: u32,
+ pub(crate) to_self_delay: u16,
+ pub(crate) max_accepted_htlcs: u16,
+ pub(crate) funding_pubkey: PublicKey,
+ pub(crate) revocation_basepoint: PublicKey,
+ pub(crate) payment_basepoint: PublicKey,
+ pub(crate) delayed_payment_basepoint: PublicKey,
+ pub(crate) htlc_basepoint: PublicKey,
+ pub(crate) first_per_commitment_point: PublicKey,
+ pub(crate) shutdown_scriptpubkey: OptionalField<Script>
+}
+
+/// A funding_created message to be sent or received from a peer
+#[derive(Clone)]
+pub struct FundingCreated {
+ pub(crate) temporary_channel_id: [u8; 32],
+ pub(crate) funding_txid: Sha256dHash,
+ pub(crate) funding_output_index: u16,
+ pub(crate) signature: Signature,
+}
+
+/// A funding_signed message to be sent or received from a peer
+#[derive(Clone)]
+pub struct FundingSigned {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) signature: Signature,
+}
+
+/// A funding_locked message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct FundingLocked {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) next_per_commitment_point: PublicKey,
+}
+
+/// A shutdown message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct Shutdown {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) scriptpubkey: Script,
+}
+
+/// A closing_signed message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct ClosingSigned {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) fee_satoshis: u64,
+ pub(crate) signature: Signature,
+}
+
+/// An update_add_htlc message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct UpdateAddHTLC {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) htlc_id: u64,
+ pub(crate) amount_msat: u64,
+ pub(crate) payment_hash: PaymentHash,
+ pub(crate) cltv_expiry: u32,
+ pub(crate) onion_routing_packet: OnionPacket,
+}
+
+/// An update_fulfill_htlc message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct UpdateFulfillHTLC {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) htlc_id: u64,
+ pub(crate) payment_preimage: PaymentPreimage,
+}
+
+/// An update_fail_htlc message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct UpdateFailHTLC {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) htlc_id: u64,
+ pub(crate) reason: OnionErrorPacket,
+}
+
+/// An update_fail_malformed_htlc message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct UpdateFailMalformedHTLC {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) htlc_id: u64,
+ pub(crate) sha256_of_onion: [u8; 32],
+ pub(crate) failure_code: u16,
+}
+
+/// A commitment_signed message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct CommitmentSigned {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) signature: Signature,
+ pub(crate) htlc_signatures: Vec<Signature>,
+}
+
+/// A revoke_and_ack message to be sent or received from a peer
+#[derive(Clone, PartialEq)]
+pub struct RevokeAndACK {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) per_commitment_secret: [u8; 32],
+ pub(crate) next_per_commitment_point: PublicKey,
+}
+
+/// An update_fee message to be sent or received from a peer
+#[derive(PartialEq, Clone)]
+pub struct UpdateFee {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) feerate_per_kw: u32,
+}
+
+#[derive(PartialEq, Clone)]
+pub(crate) struct DataLossProtect {
+ pub(crate) your_last_per_commitment_secret: [u8; 32],
+ pub(crate) my_current_per_commitment_point: PublicKey,
+}
+
+/// A channel_reestablish message to be sent or received from a peer
+#[derive(PartialEq, Clone)]
+pub struct ChannelReestablish {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) next_local_commitment_number: u64,
+ pub(crate) next_remote_commitment_number: u64,
+ pub(crate) data_loss_protect: OptionalField<DataLossProtect>,
+}
+
+/// An announcement_signatures message to be sent or received from a peer
+#[derive(PartialEq, Clone, Debug)]
+pub struct AnnouncementSignatures {
+ pub(crate) channel_id: [u8; 32],
+ pub(crate) short_channel_id: u64,
+ pub(crate) node_signature: Signature,
+ pub(crate) bitcoin_signature: Signature,
+}
+
+/// An address which can be used to connect to a remote peer
+#[derive(Clone, PartialEq, Debug)]
+pub enum NetAddress {
+ /// An IPv4 address/port on which the peer is listening.
+ IPv4 {
+ /// The 4-byte IPv4 address
+ addr: [u8; 4],
+ /// The port on which the node is listening
+ port: u16,
+ },
+ /// An IPv6 address/port on which the peer is listening.
+ IPv6 {
+ /// The 16-byte IPv6 address
+ addr: [u8; 16],
+ /// The port on which the node is listening
+ port: u16,
+ },
+ /// An old-style Tor onion address/port on which the peer is listening.
+ OnionV2 {
+ /// The bytes (usually encoded in base32 with ".onion" appended)
+ addr: [u8; 10],
+ /// The port on which the node is listening
+ port: u16,
+ },
+ /// A new-style Tor onion address/port on which the peer is listening.
+ /// To create the human-readable "hostname", concatenate ed25519_pubkey, checksum, and version,
+ /// wrap as base32 and append ".onion".
+ OnionV3 {
+ /// The ed25519 long-term public key of the peer
+ ed25519_pubkey: [u8; 32],
+ /// The checksum of the pubkey and version, as included in the onion address
+ checksum: u16,
+ /// The version byte, as defined by the Tor Onion v3 spec.
+ version: u8,
+ /// The port on which the node is listening
+ port: u16,
+ },
+}
+impl NetAddress {
+ fn get_id(&self) -> u8 {
+ match self {
+ &NetAddress::IPv4 {..} => { 1 },
+ &NetAddress::IPv6 {..} => { 2 },
+ &NetAddress::OnionV2 {..} => { 3 },
+ &NetAddress::OnionV3 {..} => { 4 },
+ }
+ }
+
+ /// Strict byte-length of address descriptor, 1-byte type not recorded
+ fn len(&self) -> u16 {
+ match self {
+ &NetAddress::IPv4 { .. } => { 6 },
+ &NetAddress::IPv6 { .. } => { 18 },
+ &NetAddress::OnionV2 { .. } => { 12 },
+ &NetAddress::OnionV3 { .. } => { 37 },
+ }
+ }
+}
+
+impl Writeable for NetAddress {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ match self {
+ &NetAddress::IPv4 { ref addr, ref port } => {
+ 1u8.write(writer)?;
+ addr.write(writer)?;
+ port.write(writer)?;
+ },
+ &NetAddress::IPv6 { ref addr, ref port } => {
+ 2u8.write(writer)?;
+ addr.write(writer)?;
+ port.write(writer)?;
+ },
+ &NetAddress::OnionV2 { ref addr, ref port } => {
+ 3u8.write(writer)?;
+ addr.write(writer)?;
+ port.write(writer)?;
+ },
+ &NetAddress::OnionV3 { ref ed25519_pubkey, ref checksum, ref version, ref port } => {
+ 4u8.write(writer)?;
+ ed25519_pubkey.write(writer)?;
+ checksum.write(writer)?;
+ version.write(writer)?;
+ port.write(writer)?;
+ }
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for Result<NetAddress, u8> {
+ fn read(reader: &mut R) -> Result<Result<NetAddress, u8>, DecodeError> {
+ let byte = <u8 as Readable<R>>::read(reader)?;
+ match byte {
+ 1 => {
+ Ok(Ok(NetAddress::IPv4 {
+ addr: Readable::read(reader)?,
+ port: Readable::read(reader)?,
+ }))
+ },
+ 2 => {
+ Ok(Ok(NetAddress::IPv6 {
+ addr: Readable::read(reader)?,
+ port: Readable::read(reader)?,
+ }))
+ },
+ 3 => {
+ Ok(Ok(NetAddress::OnionV2 {
+ addr: Readable::read(reader)?,
+ port: Readable::read(reader)?,
+ }))
+ },
+ 4 => {
+ Ok(Ok(NetAddress::OnionV3 {
+ ed25519_pubkey: Readable::read(reader)?,
+ checksum: Readable::read(reader)?,
+ version: Readable::read(reader)?,
+ port: Readable::read(reader)?,
+ }))
+ },
+ _ => return Ok(Err(byte)),
+ }
+ }
+}
+
+// Only exposed as broadcast of node_announcement should be filtered by node_id
+/// The unsigned part of a node_announcement
+#[derive(PartialEq, Clone, Debug)]
+pub struct UnsignedNodeAnnouncement {
+ pub(crate) features: GlobalFeatures,
+ pub(crate) timestamp: u32,
+ /// The node_id this announcement originated from (don't rebroadcast the node_announcement back
+ /// to this node).
+ pub node_id: PublicKey,
+ pub(crate) rgb: [u8; 3],
+ pub(crate) alias: [u8; 32],
+ /// List of addresses on which this node is reachable. Note that you may only have up to one
+ /// address of each type, if you have more, they may be silently discarded or we may panic!
+ pub(crate) addresses: Vec<NetAddress>,
+ pub(crate) excess_address_data: Vec<u8>,
+ pub(crate) excess_data: Vec<u8>,
+}
+#[derive(PartialEq, Clone)]
+/// A node_announcement message to be sent or received from a peer
+pub struct NodeAnnouncement {
+ pub(crate) signature: Signature,
+ pub(crate) contents: UnsignedNodeAnnouncement,
+}
+
+// Only exposed as broadcast of channel_announcement should be filtered by node_id
+/// The unsigned part of a channel_announcement
+#[derive(PartialEq, Clone, Debug)]
+pub struct UnsignedChannelAnnouncement {
+ pub(crate) features: GlobalFeatures,
+ pub(crate) chain_hash: Sha256dHash,
+ pub(crate) short_channel_id: u64,
+ /// One of the two node_ids which are endpoints of this channel
+ pub node_id_1: PublicKey,
+ /// The other of the two node_ids which are endpoints of this channel
+ pub node_id_2: PublicKey,
+ pub(crate) bitcoin_key_1: PublicKey,
+ pub(crate) bitcoin_key_2: PublicKey,
+ pub(crate) excess_data: Vec<u8>,
+}
+/// A channel_announcement message to be sent or received from a peer
+#[derive(PartialEq, Clone, Debug)]
+pub struct ChannelAnnouncement {
+ pub(crate) node_signature_1: Signature,
+ pub(crate) node_signature_2: Signature,
+ pub(crate) bitcoin_signature_1: Signature,
+ pub(crate) bitcoin_signature_2: Signature,
+ pub(crate) contents: UnsignedChannelAnnouncement,
+}
+
+#[derive(PartialEq, Clone, Debug)]
+pub(crate) struct UnsignedChannelUpdate {
+ pub(crate) chain_hash: Sha256dHash,
+ pub(crate) short_channel_id: u64,
+ pub(crate) timestamp: u32,
+ pub(crate) flags: u16,
+ pub(crate) cltv_expiry_delta: u16,
+ pub(crate) htlc_minimum_msat: u64,
+ pub(crate) fee_base_msat: u32,
+ pub(crate) fee_proportional_millionths: u32,
+ pub(crate) excess_data: Vec<u8>,
+}
+/// A channel_update message to be sent or received from a peer
+#[derive(PartialEq, Clone, Debug)]
+pub struct ChannelUpdate {
+ pub(crate) signature: Signature,
+ pub(crate) contents: UnsignedChannelUpdate,
+}
+
- pub struct HandleError { //TODO: rename me
++/// Used to put an error message in a LightningError
+#[derive(Clone)]
+pub enum ErrorAction {
+ /// The peer took some action which made us think they were useless. Disconnect them.
+ DisconnectPeer {
+ /// An error message which we should make an effort to send before we disconnect.
+ msg: Option<ErrorMessage>
+ },
+ /// The peer did something harmless that we weren't able to process, just log and ignore
+ IgnoreError,
+ /// The peer did something incorrect. Tell them.
+ SendErrorMessage {
+ /// The message to send.
+ msg: ErrorMessage
+ },
+}
+
+/// An Err type for failure to process messages.
- pub action: Option<ErrorAction>, //TODO: Make this required
++pub struct LightningError {
+ /// A human-readable message describing the error
+ pub err: &'static str,
+ /// The action which should be taken against the offending peer.
- fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &OpenChannel) -> Result<(), HandleError>;
++ pub action: ErrorAction,
+}
+
+/// Struct used to return values from revoke_and_ack messages, containing a bunch of commitment
+/// transaction updates if they were pending.
+#[derive(PartialEq, Clone)]
+pub struct CommitmentUpdate {
+ /// update_add_htlc messages which should be sent
+ pub update_add_htlcs: Vec<UpdateAddHTLC>,
+ /// update_fulfill_htlc messages which should be sent
+ pub update_fulfill_htlcs: Vec<UpdateFulfillHTLC>,
+ /// update_fail_htlc messages which should be sent
+ pub update_fail_htlcs: Vec<UpdateFailHTLC>,
+ /// update_fail_malformed_htlc messages which should be sent
+ pub update_fail_malformed_htlcs: Vec<UpdateFailMalformedHTLC>,
+ /// An update_fee message which should be sent
+ pub update_fee: Option<UpdateFee>,
+ /// Finally, the commitment_signed message which should be sent
+ pub commitment_signed: CommitmentSigned,
+}
+
+/// The information we received from a peer along the route of a payment we originated. This is
+/// returned by ChannelMessageHandler::handle_update_fail_htlc to be passed into
+/// RoutingMessageHandler::handle_htlc_fail_channel_update to update our network map.
+#[derive(Clone)]
+pub enum HTLCFailChannelUpdate {
+ /// We received an error which included a full ChannelUpdate message.
+ ChannelUpdateMessage {
+ /// The unwrapped message we received
+ msg: ChannelUpdate,
+ },
+ /// We received an error which indicated only that a channel has been closed
+ ChannelClosed {
+ /// The short_channel_id which has now closed.
+ short_channel_id: u64,
+ /// when this true, this channel should be permanently removed from the
+ /// consideration. Otherwise, this channel can be restored as new channel_update is received
+ is_permanent: bool,
+ },
+ /// We received an error which indicated only that a node has failed
+ NodeFailure {
+ /// The node_id that has failed.
+ node_id: PublicKey,
+ /// when this true, node should be permanently removed from the
+ /// consideration. Otherwise, the channels connected to this node can be
+ /// restored as new channel_update is received
+ is_permanent: bool,
+ }
+}
+
+/// Messages could have optional fields to use with extended features
+/// As we wish to serialize these differently from Option<T>s (Options get a tag byte, but
+/// OptionalFeild simply gets Present if there are enough bytes to read into it), we have a
+/// separate enum type for them.
+#[derive(Clone, PartialEq)]
+pub enum OptionalField<T> {
+ /// Optional field is included in message
+ Present(T),
+ /// Optional field is absent in message
+ Absent
+}
+
+/// A trait to describe an object which can receive channel messages.
+///
+/// Messages MAY be called in parallel when they originate from different their_node_ids, however
+/// they MUST NOT be called in parallel when the two calls have the same their_node_id.
+pub trait ChannelMessageHandler : events::MessageSendEventsProvider + Send + Sync {
+ //Channel init:
+ /// Handle an incoming open_channel message from the given peer.
- fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &AcceptChannel) -> Result<(), HandleError>;
++ fn handle_open_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &OpenChannel) -> Result<(), LightningError>;
+ /// Handle an incoming accept_channel message from the given peer.
- fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &FundingCreated) -> Result<(), HandleError>;
++ fn handle_accept_channel(&self, their_node_id: &PublicKey, their_local_features: LocalFeatures, msg: &AcceptChannel) -> Result<(), LightningError>;
+ /// Handle an incoming funding_created message from the given peer.
- fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &FundingSigned) -> Result<(), HandleError>;
++ fn handle_funding_created(&self, their_node_id: &PublicKey, msg: &FundingCreated) -> Result<(), LightningError>;
+ /// Handle an incoming funding_signed message from the given peer.
- fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &FundingLocked) -> Result<(), HandleError>;
++ fn handle_funding_signed(&self, their_node_id: &PublicKey, msg: &FundingSigned) -> Result<(), LightningError>;
+ /// Handle an incoming funding_locked message from the given peer.
- fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown) -> Result<(), HandleError>;
++ fn handle_funding_locked(&self, their_node_id: &PublicKey, msg: &FundingLocked) -> Result<(), LightningError>;
+
+ // Channl close:
+ /// Handle an incoming shutdown message from the given peer.
- fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &ClosingSigned) -> Result<(), HandleError>;
++ fn handle_shutdown(&self, their_node_id: &PublicKey, msg: &Shutdown) -> Result<(), LightningError>;
+ /// Handle an incoming closing_signed message from the given peer.
- fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &UpdateAddHTLC) -> Result<(), HandleError>;
++ fn handle_closing_signed(&self, their_node_id: &PublicKey, msg: &ClosingSigned) -> Result<(), LightningError>;
+
+ // HTLC handling:
+ /// Handle an incoming update_add_htlc message from the given peer.
- fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC) -> Result<(), HandleError>;
++ fn handle_update_add_htlc(&self, their_node_id: &PublicKey, msg: &UpdateAddHTLC) -> Result<(), LightningError>;
+ /// Handle an incoming update_fulfill_htlc message from the given peer.
- fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result<(), HandleError>;
++ fn handle_update_fulfill_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFulfillHTLC) -> Result<(), LightningError>;
+ /// Handle an incoming update_fail_htlc message from the given peer.
- fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC) -> Result<(), HandleError>;
++ fn handle_update_fail_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailHTLC) -> Result<(), LightningError>;
+ /// Handle an incoming update_fail_malformed_htlc message from the given peer.
- fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned) -> Result<(), HandleError>;
++ fn handle_update_fail_malformed_htlc(&self, their_node_id: &PublicKey, msg: &UpdateFailMalformedHTLC) -> Result<(), LightningError>;
+ /// Handle an incoming commitment_signed message from the given peer.
- fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK) -> Result<(), HandleError>;
++ fn handle_commitment_signed(&self, their_node_id: &PublicKey, msg: &CommitmentSigned) -> Result<(), LightningError>;
+ /// Handle an incoming revoke_and_ack message from the given peer.
- fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &UpdateFee) -> Result<(), HandleError>;
++ fn handle_revoke_and_ack(&self, their_node_id: &PublicKey, msg: &RevokeAndACK) -> Result<(), LightningError>;
+
+ /// Handle an incoming update_fee message from the given peer.
- fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures) -> Result<(), HandleError>;
++ fn handle_update_fee(&self, their_node_id: &PublicKey, msg: &UpdateFee) -> Result<(), LightningError>;
+
+ // Channel-to-announce:
+ /// Handle an incoming announcement_signatures message from the given peer.
- fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish) -> Result<(), HandleError>;
++ fn handle_announcement_signatures(&self, their_node_id: &PublicKey, msg: &AnnouncementSignatures) -> Result<(), LightningError>;
+
+ // Connection loss/reestablish:
+ /// Indicates a connection to the peer failed/an existing connection was lost. If no connection
+ /// is believed to be possible in the future (eg they're sending us messages we don't
+ /// understand or indicate they require unknown feature bits), no_connection_possible is set
+ /// and any outstanding channels should be failed.
+ fn peer_disconnected(&self, their_node_id: &PublicKey, no_connection_possible: bool);
+
+ /// Handle a peer reconnecting, possibly generating channel_reestablish message(s).
+ fn peer_connected(&self, their_node_id: &PublicKey);
+ /// Handle an incoming channel_reestablish message from the given peer.
- fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, HandleError>;
++ fn handle_channel_reestablish(&self, their_node_id: &PublicKey, msg: &ChannelReestablish) -> Result<(), LightningError>;
+
+ // Error:
+ /// Handle an incoming error message from the given peer.
+ fn handle_error(&self, their_node_id: &PublicKey, msg: &ErrorMessage);
+}
+
+/// A trait to describe an object which can receive routing messages.
+pub trait RoutingMessageHandler : Send + Sync {
+ /// Handle an incoming node_announcement message, returning true if it should be forwarded on,
+ /// false or returning an Err otherwise.
- fn handle_channel_announcement(&self, msg: &ChannelAnnouncement) -> Result<bool, HandleError>;
++ fn handle_node_announcement(&self, msg: &NodeAnnouncement) -> Result<bool, LightningError>;
+ /// Handle a channel_announcement message, returning true if it should be forwarded on, false
+ /// or returning an Err otherwise.
- fn handle_channel_update(&self, msg: &ChannelUpdate) -> Result<bool, HandleError>;
++ fn handle_channel_announcement(&self, msg: &ChannelAnnouncement) -> Result<bool, LightningError>;
+ /// Handle an incoming channel_update message, returning true if it should be forwarded on,
+ /// false or returning an Err otherwise.
- impl fmt::Debug for HandleError {
++ fn handle_channel_update(&self, msg: &ChannelUpdate) -> Result<bool, LightningError>;
+ /// Handle some updates to the route graph that we learned due to an outbound failed payment.
+ fn handle_htlc_fail_channel_update(&self, update: &HTLCFailChannelUpdate);
+ /// Gets a subset of the channel announcements and updates required to dump our routing table
+ /// to a remote node, starting at the short_channel_id indicated by starting_point and
+ /// including batch_amount entries.
+ fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, ChannelUpdate, ChannelUpdate)>;
+ /// Gets a subset of the node announcements required to dump our routing table to a remote node,
+ /// starting at the node *after* the provided publickey and including batch_amount entries.
+ /// If None is provided for starting_point, we start at the first node.
+ fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<NodeAnnouncement>;
+}
+
+pub(crate) struct OnionRealm0HopData {
+ pub(crate) short_channel_id: u64,
+ pub(crate) amt_to_forward: u64,
+ pub(crate) outgoing_cltv_value: u32,
+ // 12 bytes of 0-padding
+}
+
+mod fuzzy_internal_msgs {
+ // These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
+ // them from untrusted input):
+
+ use super::OnionRealm0HopData;
+ pub struct OnionHopData {
+ pub(crate) realm: u8,
+ pub(crate) data: OnionRealm0HopData,
+ pub(crate) hmac: [u8; 32],
+ }
+
+ pub struct DecodedOnionErrorPacket {
+ pub(crate) hmac: [u8; 32],
+ pub(crate) failuremsg: Vec<u8>,
+ pub(crate) pad: Vec<u8>,
+ }
+}
+#[cfg(feature = "fuzztarget")]
+pub use self::fuzzy_internal_msgs::*;
+#[cfg(not(feature = "fuzztarget"))]
+pub(crate) use self::fuzzy_internal_msgs::*;
+
+#[derive(Clone)]
+pub(crate) struct OnionPacket {
+ pub(crate) version: u8,
+ /// In order to ensure we always return an error on Onion decode in compliance with BOLT 4, we
+ /// have to deserialize OnionPackets contained in UpdateAddHTLCs even if the ephemeral public
+ /// key (here) is bogus, so we hold a Result instead of a PublicKey as we'd like.
+ pub(crate) public_key: Result<PublicKey, secp256k1::Error>,
+ pub(crate) hop_data: [u8; 20*65],
+ pub(crate) hmac: [u8; 32],
+}
+
+impl PartialEq for OnionPacket {
+ fn eq(&self, other: &OnionPacket) -> bool {
+ for (i, j) in self.hop_data.iter().zip(other.hop_data.iter()) {
+ if i != j { return false; }
+ }
+ self.version == other.version &&
+ self.public_key == other.public_key &&
+ self.hmac == other.hmac
+ }
+}
+
+#[derive(Clone, PartialEq)]
+pub(crate) struct OnionErrorPacket {
+ // This really should be a constant size slice, but the spec lets these things be up to 128KB?
+ // (TODO) We limit it in decode to much lower...
+ pub(crate) data: Vec<u8>,
+}
+
+impl Error for DecodeError {
+ fn description(&self) -> &str {
+ match *self {
+ DecodeError::UnknownVersion => "Unknown realm byte in Onion packet",
+ DecodeError::UnknownRequiredFeature => "Unknown required feature preventing decode",
+ DecodeError::InvalidValue => "Nonsense bytes didn't map to the type they were interpreted as",
+ DecodeError::ShortRead => "Packet extended beyond the provided bytes",
+ DecodeError::ExtraAddressesPerType => "More than one address of a single type",
+ DecodeError::BadLengthDescriptor => "A length descriptor in the packet didn't describe the later data correctly",
+ DecodeError::Io(ref e) => e.description(),
+ }
+ }
+}
+impl fmt::Display for DecodeError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str(self.description())
+ }
+}
+
++impl fmt::Debug for LightningError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_str(self.err)
+ }
+}
+
+impl From<::std::io::Error> for DecodeError {
+ fn from(e: ::std::io::Error) -> Self {
+ if e.kind() == ::std::io::ErrorKind::UnexpectedEof {
+ DecodeError::ShortRead
+ } else {
+ DecodeError::Io(e)
+ }
+ }
+}
+
+impl Writeable for OptionalField<Script> {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ match *self {
+ OptionalField::Present(ref script) => {
+ // Note that Writeable for script includes the 16-bit length tag for us
+ script.write(w)?;
+ },
+ OptionalField::Absent => {}
+ }
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for OptionalField<Script> {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ match <u16 as Readable<R>>::read(r) {
+ Ok(len) => {
+ let mut buf = vec![0; len as usize];
+ r.read_exact(&mut buf)?;
+ Ok(OptionalField::Present(Script::from(buf)))
+ },
+ Err(DecodeError::ShortRead) => Ok(OptionalField::Absent),
+ Err(e) => Err(e)
+ }
+ }
+}
+
+impl_writeable_len_match!(AcceptChannel, {
+ {AcceptChannel{ shutdown_scriptpubkey: OptionalField::Present(ref script), .. }, 270 + 2 + script.len()},
+ {_, 270}
+ }, {
+ temporary_channel_id,
+ dust_limit_satoshis,
+ max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis,
+ htlc_minimum_msat,
+ minimum_depth,
+ to_self_delay,
+ max_accepted_htlcs,
+ funding_pubkey,
+ revocation_basepoint,
+ payment_basepoint,
+ delayed_payment_basepoint,
+ htlc_basepoint,
+ first_per_commitment_point,
+ shutdown_scriptpubkey
+});
+
+impl_writeable!(AnnouncementSignatures, 32+8+64*2, {
+ channel_id,
+ short_channel_id,
+ node_signature,
+ bitcoin_signature
+});
+
+impl Writeable for ChannelReestablish {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(if let OptionalField::Present(..) = self.data_loss_protect { 32+2*8+33+32 } else { 32+2*8 });
+ self.channel_id.write(w)?;
+ self.next_local_commitment_number.write(w)?;
+ self.next_remote_commitment_number.write(w)?;
+ match self.data_loss_protect {
+ OptionalField::Present(ref data_loss_protect) => {
+ (*data_loss_protect).your_last_per_commitment_secret.write(w)?;
+ (*data_loss_protect).my_current_per_commitment_point.write(w)?;
+ },
+ OptionalField::Absent => {}
+ }
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for ChannelReestablish{
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(Self {
+ channel_id: Readable::read(r)?,
+ next_local_commitment_number: Readable::read(r)?,
+ next_remote_commitment_number: Readable::read(r)?,
+ data_loss_protect: {
+ match <[u8; 32] as Readable<R>>::read(r) {
+ Ok(your_last_per_commitment_secret) =>
+ OptionalField::Present(DataLossProtect {
+ your_last_per_commitment_secret,
+ my_current_per_commitment_point: Readable::read(r)?,
+ }),
+ Err(DecodeError::ShortRead) => OptionalField::Absent,
+ Err(e) => return Err(e)
+ }
+ }
+ })
+ }
+}
+
+impl_writeable!(ClosingSigned, 32+8+64, {
+ channel_id,
+ fee_satoshis,
+ signature
+});
+
+impl_writeable_len_match!(CommitmentSigned, {
+ { CommitmentSigned { ref htlc_signatures, .. }, 32+64+2+htlc_signatures.len()*64 }
+ }, {
+ channel_id,
+ signature,
+ htlc_signatures
+});
+
+impl_writeable_len_match!(DecodedOnionErrorPacket, {
+ { DecodedOnionErrorPacket { ref failuremsg, ref pad, .. }, 32 + 4 + failuremsg.len() + pad.len() }
+ }, {
+ hmac,
+ failuremsg,
+ pad
+});
+
+impl_writeable!(FundingCreated, 32+32+2+64, {
+ temporary_channel_id,
+ funding_txid,
+ funding_output_index,
+ signature
+});
+
+impl_writeable!(FundingSigned, 32+64, {
+ channel_id,
+ signature
+});
+
+impl_writeable!(FundingLocked, 32+33, {
+ channel_id,
+ next_per_commitment_point
+});
+
+impl_writeable_len_match!(GlobalFeatures, {
+ { GlobalFeatures { ref flags }, flags.len() + 2 }
+ }, {
+ flags
+});
+
+impl_writeable_len_match!(LocalFeatures, {
+ { LocalFeatures { ref flags }, flags.len() + 2 }
+ }, {
+ flags
+});
+
+impl_writeable_len_match!(Init, {
+ { Init { ref global_features, ref local_features }, global_features.flags.len() + local_features.flags.len() + 4 }
+ }, {
+ global_features,
+ local_features
+});
+
+impl_writeable_len_match!(OpenChannel, {
+ { OpenChannel { shutdown_scriptpubkey: OptionalField::Present(ref script), .. }, 319 + 2 + script.len() },
+ { _, 319 }
+ }, {
+ chain_hash,
+ temporary_channel_id,
+ funding_satoshis,
+ push_msat,
+ dust_limit_satoshis,
+ max_htlc_value_in_flight_msat,
+ channel_reserve_satoshis,
+ htlc_minimum_msat,
+ feerate_per_kw,
+ to_self_delay,
+ max_accepted_htlcs,
+ funding_pubkey,
+ revocation_basepoint,
+ payment_basepoint,
+ delayed_payment_basepoint,
+ htlc_basepoint,
+ first_per_commitment_point,
+ channel_flags,
+ shutdown_scriptpubkey
+});
+
+impl_writeable!(RevokeAndACK, 32+32+33, {
+ channel_id,
+ per_commitment_secret,
+ next_per_commitment_point
+});
+
+impl_writeable_len_match!(Shutdown, {
+ { Shutdown { ref scriptpubkey, .. }, 32 + 2 + scriptpubkey.len() }
+ }, {
+ channel_id,
+ scriptpubkey
+});
+
+impl_writeable_len_match!(UpdateFailHTLC, {
+ { UpdateFailHTLC { ref reason, .. }, 32 + 10 + reason.data.len() }
+ }, {
+ channel_id,
+ htlc_id,
+ reason
+});
+
+impl_writeable!(UpdateFailMalformedHTLC, 32+8+32+2, {
+ channel_id,
+ htlc_id,
+ sha256_of_onion,
+ failure_code
+});
+
+impl_writeable!(UpdateFee, 32+4, {
+ channel_id,
+ feerate_per_kw
+});
+
+impl_writeable!(UpdateFulfillHTLC, 32+8+32, {
+ channel_id,
+ htlc_id,
+ payment_preimage
+});
+
+impl_writeable_len_match!(OnionErrorPacket, {
+ { OnionErrorPacket { ref data, .. }, 2 + data.len() }
+ }, {
+ data
+});
+
+impl Writeable for OnionPacket {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(1 + 33 + 20*65 + 32);
+ self.version.write(w)?;
+ match self.public_key {
+ Ok(pubkey) => pubkey.write(w)?,
+ Err(_) => [0u8;33].write(w)?,
+ }
+ w.write_all(&self.hop_data)?;
+ self.hmac.write(w)?;
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for OnionPacket {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(OnionPacket {
+ version: Readable::read(r)?,
+ public_key: {
+ let mut buf = [0u8;33];
+ r.read_exact(&mut buf)?;
+ PublicKey::from_slice(&buf)
+ },
+ hop_data: Readable::read(r)?,
+ hmac: Readable::read(r)?,
+ })
+ }
+}
+
+impl_writeable!(UpdateAddHTLC, 32+8+8+32+4+1366, {
+ channel_id,
+ htlc_id,
+ amount_msat,
+ payment_hash,
+ cltv_expiry,
+ onion_routing_packet
+});
+
+impl Writeable for OnionRealm0HopData {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(32);
+ self.short_channel_id.write(w)?;
+ self.amt_to_forward.write(w)?;
+ self.outgoing_cltv_value.write(w)?;
+ w.write_all(&[0;12])?;
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for OnionRealm0HopData {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(OnionRealm0HopData {
+ short_channel_id: Readable::read(r)?,
+ amt_to_forward: Readable::read(r)?,
+ outgoing_cltv_value: {
+ let v: u32 = Readable::read(r)?;
+ r.read_exact(&mut [0; 12])?;
+ v
+ }
+ })
+ }
+}
+
+impl Writeable for OnionHopData {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(65);
+ self.realm.write(w)?;
+ self.data.write(w)?;
+ self.hmac.write(w)?;
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for OnionHopData {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(OnionHopData {
+ realm: {
+ let r: u8 = Readable::read(r)?;
+ if r != 0 {
+ return Err(DecodeError::UnknownVersion);
+ }
+ r
+ },
+ data: Readable::read(r)?,
+ hmac: Readable::read(r)?,
+ })
+ }
+}
+
+impl Writeable for Ping {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(self.byteslen as usize + 4);
+ self.ponglen.write(w)?;
+ vec![0u8; self.byteslen as usize].write(w)?; // size-unchecked write
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for Ping {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(Ping {
+ ponglen: Readable::read(r)?,
+ byteslen: {
+ let byteslen = Readable::read(r)?;
+ r.read_exact(&mut vec![0u8; byteslen as usize][..])?;
+ byteslen
+ }
+ })
+ }
+}
+
+impl Writeable for Pong {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(self.byteslen as usize + 2);
+ vec![0u8; self.byteslen as usize].write(w)?; // size-unchecked write
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for Pong {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(Pong {
+ byteslen: {
+ let byteslen = Readable::read(r)?;
+ r.read_exact(&mut vec![0u8; byteslen as usize][..])?;
+ byteslen
+ }
+ })
+ }
+}
+
+impl Writeable for UnsignedChannelAnnouncement {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(2 + 2*32 + 4*33 + self.features.flags.len() + self.excess_data.len());
+ self.features.write(w)?;
+ self.chain_hash.write(w)?;
+ self.short_channel_id.write(w)?;
+ self.node_id_1.write(w)?;
+ self.node_id_2.write(w)?;
+ self.bitcoin_key_1.write(w)?;
+ self.bitcoin_key_2.write(w)?;
+ w.write_all(&self.excess_data[..])?;
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for UnsignedChannelAnnouncement {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(Self {
+ features: {
+ let f: GlobalFeatures = Readable::read(r)?;
+ if f.requires_unknown_bits() {
+ return Err(DecodeError::UnknownRequiredFeature);
+ }
+ f
+ },
+ chain_hash: Readable::read(r)?,
+ short_channel_id: Readable::read(r)?,
+ node_id_1: Readable::read(r)?,
+ node_id_2: Readable::read(r)?,
+ bitcoin_key_1: Readable::read(r)?,
+ bitcoin_key_2: Readable::read(r)?,
+ excess_data: {
+ let mut excess_data = vec![];
+ r.read_to_end(&mut excess_data)?;
+ excess_data
+ },
+ })
+ }
+}
+
+impl_writeable_len_match!(ChannelAnnouncement, {
+ { ChannelAnnouncement { contents: UnsignedChannelAnnouncement {ref features, ref excess_data, ..}, .. },
+ 2 + 2*32 + 4*33 + features.flags.len() + excess_data.len() + 4*64 }
+ }, {
+ node_signature_1,
+ node_signature_2,
+ bitcoin_signature_1,
+ bitcoin_signature_2,
+ contents
+});
+
+impl Writeable for UnsignedChannelUpdate {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(64 + self.excess_data.len());
+ self.chain_hash.write(w)?;
+ self.short_channel_id.write(w)?;
+ self.timestamp.write(w)?;
+ self.flags.write(w)?;
+ self.cltv_expiry_delta.write(w)?;
+ self.htlc_minimum_msat.write(w)?;
+ self.fee_base_msat.write(w)?;
+ self.fee_proportional_millionths.write(w)?;
+ w.write_all(&self.excess_data[..])?;
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for UnsignedChannelUpdate {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(Self {
+ chain_hash: Readable::read(r)?,
+ short_channel_id: Readable::read(r)?,
+ timestamp: Readable::read(r)?,
+ flags: Readable::read(r)?,
+ cltv_expiry_delta: Readable::read(r)?,
+ htlc_minimum_msat: Readable::read(r)?,
+ fee_base_msat: Readable::read(r)?,
+ fee_proportional_millionths: Readable::read(r)?,
+ excess_data: {
+ let mut excess_data = vec![];
+ r.read_to_end(&mut excess_data)?;
+ excess_data
+ },
+ })
+ }
+}
+
+impl_writeable_len_match!(ChannelUpdate, {
+ { ChannelUpdate { contents: UnsignedChannelUpdate {ref excess_data, ..}, .. },
+ 64 + excess_data.len() + 64 }
+ }, {
+ signature,
+ contents
+});
+
+impl Writeable for ErrorMessage {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(32 + 2 + self.data.len());
+ self.channel_id.write(w)?;
+ (self.data.len() as u16).write(w)?;
+ w.write_all(self.data.as_bytes())?;
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for ErrorMessage {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ Ok(Self {
+ channel_id: Readable::read(r)?,
+ data: {
+ let mut sz: usize = <u16 as Readable<R>>::read(r)? as usize;
+ let mut data = vec![];
+ let data_len = r.read_to_end(&mut data)?;
+ sz = cmp::min(data_len, sz);
+ match String::from_utf8(data[..sz as usize].to_vec()) {
+ Ok(s) => s,
+ Err(_) => return Err(DecodeError::InvalidValue),
+ }
+ }
+ })
+ }
+}
+
+impl Writeable for UnsignedNodeAnnouncement {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), ::std::io::Error> {
+ w.size_hint(64 + 76 + self.features.flags.len() + self.addresses.len()*38 + self.excess_address_data.len() + self.excess_data.len());
+ self.features.write(w)?;
+ self.timestamp.write(w)?;
+ self.node_id.write(w)?;
+ w.write_all(&self.rgb)?;
+ self.alias.write(w)?;
+
+ let mut addrs_to_encode = self.addresses.clone();
+ addrs_to_encode.sort_unstable_by(|a, b| { a.get_id().cmp(&b.get_id()) });
+ addrs_to_encode.dedup_by(|a, b| { a.get_id() == b.get_id() });
+ let mut addr_len = 0;
+ for addr in &addrs_to_encode {
+ addr_len += 1 + addr.len();
+ }
+ (addr_len + self.excess_address_data.len() as u16).write(w)?;
+ for addr in addrs_to_encode {
+ addr.write(w)?;
+ }
+ w.write_all(&self.excess_address_data[..])?;
+ w.write_all(&self.excess_data[..])?;
+ Ok(())
+ }
+}
+
+impl<R: Read> Readable<R> for UnsignedNodeAnnouncement {
+ fn read(r: &mut R) -> Result<Self, DecodeError> {
+ let features: GlobalFeatures = Readable::read(r)?;
+ if features.requires_unknown_bits() {
+ return Err(DecodeError::UnknownRequiredFeature);
+ }
+ let timestamp: u32 = Readable::read(r)?;
+ let node_id: PublicKey = Readable::read(r)?;
+ let mut rgb = [0; 3];
+ r.read_exact(&mut rgb)?;
+ let alias: [u8; 32] = Readable::read(r)?;
+
+ let addr_len: u16 = Readable::read(r)?;
+ let mut addresses: Vec<NetAddress> = Vec::with_capacity(4);
+ let mut addr_readpos = 0;
+ let mut excess = false;
+ let mut excess_byte = 0;
+ loop {
+ if addr_len <= addr_readpos { break; }
+ match Readable::read(r) {
+ Ok(Ok(addr)) => {
+ match addr {
+ NetAddress::IPv4 { .. } => {
+ if addresses.len() > 0 {
+ return Err(DecodeError::ExtraAddressesPerType);
+ }
+ },
+ NetAddress::IPv6 { .. } => {
+ if addresses.len() > 1 || (addresses.len() == 1 && addresses[0].get_id() != 1) {
+ return Err(DecodeError::ExtraAddressesPerType);
+ }
+ },
+ NetAddress::OnionV2 { .. } => {
+ if addresses.len() > 2 || (addresses.len() > 0 && addresses.last().unwrap().get_id() > 2) {
+ return Err(DecodeError::ExtraAddressesPerType);
+ }
+ },
+ NetAddress::OnionV3 { .. } => {
+ if addresses.len() > 3 || (addresses.len() > 0 && addresses.last().unwrap().get_id() > 3) {
+ return Err(DecodeError::ExtraAddressesPerType);
+ }
+ },
+ }
+ if addr_len < addr_readpos + 1 + addr.len() {
+ return Err(DecodeError::BadLengthDescriptor);
+ }
+ addr_readpos += (1 + addr.len()) as u16;
+ addresses.push(addr);
+ },
+ Ok(Err(unknown_descriptor)) => {
+ excess = true;
+ excess_byte = unknown_descriptor;
+ break;
+ },
+ Err(DecodeError::ShortRead) => return Err(DecodeError::BadLengthDescriptor),
+ Err(e) => return Err(e),
+ }
+ }
+
+ let mut excess_data = vec![];
+ let excess_address_data = if addr_readpos < addr_len {
+ let mut excess_address_data = vec![0; (addr_len - addr_readpos) as usize];
+ r.read_exact(&mut excess_address_data[if excess { 1 } else { 0 }..])?;
+ if excess {
+ excess_address_data[0] = excess_byte;
+ }
+ excess_address_data
+ } else {
+ if excess {
+ excess_data.push(excess_byte);
+ }
+ Vec::new()
+ };
+ r.read_to_end(&mut excess_data)?;
+ Ok(UnsignedNodeAnnouncement {
+ features,
+ timestamp,
+ node_id,
+ rgb,
+ alias,
+ addresses,
+ excess_address_data,
+ excess_data,
+ })
+ }
+}
+
+impl_writeable_len_match!(NodeAnnouncement, {
+ { NodeAnnouncement { contents: UnsignedNodeAnnouncement { ref features, ref addresses, ref excess_address_data, ref excess_data, ..}, .. },
+ 64 + 76 + features.flags.len() + addresses.len()*38 + excess_address_data.len() + excess_data.len() }
+ }, {
+ signature,
+ contents
+});
+
+#[cfg(test)]
+mod tests {
+ use hex;
+ use ln::msgs;
+ use ln::msgs::{GlobalFeatures, LocalFeatures, OptionalField, OnionErrorPacket};
+ use ln::channelmanager::{PaymentPreimage, PaymentHash};
+ use util::ser::Writeable;
+
+ use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+ use bitcoin_hashes::hex::FromHex;
+ use bitcoin::util::address::Address;
+ use bitcoin::network::constants::Network;
+ use bitcoin::blockdata::script::Builder;
+ use bitcoin::blockdata::opcodes;
+
+ use secp256k1::key::{PublicKey,SecretKey};
+ use secp256k1::{Secp256k1, Message};
+
+ #[test]
+ fn encoding_channel_reestablish_no_secret() {
+ let cr = msgs::ChannelReestablish {
+ channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+ next_local_commitment_number: 3,
+ next_remote_commitment_number: 4,
+ data_loss_protect: OptionalField::Absent,
+ };
+
+ let encoded_value = cr.encode();
+ assert_eq!(
+ encoded_value,
+ vec![4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4]
+ );
+ }
+
+ #[test]
+ fn encoding_channel_reestablish_with_secret() {
+ let public_key = {
+ let secp_ctx = Secp256k1::new();
+ PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap())
+ };
+
+ let cr = msgs::ChannelReestablish {
+ channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+ next_local_commitment_number: 3,
+ next_remote_commitment_number: 4,
+ data_loss_protect: OptionalField::Present(msgs::DataLossProtect { your_last_per_commitment_secret: [9;32], my_current_per_commitment_point: public_key}),
+ };
+
+ let encoded_value = cr.encode();
+ assert_eq!(
+ encoded_value,
+ vec![4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143]
+ );
+ }
+
+ macro_rules! get_keys_from {
+ ($slice: expr, $secp_ctx: expr) => {
+ {
+ let privkey = SecretKey::from_slice(&hex::decode($slice).unwrap()[..]).unwrap();
+ let pubkey = PublicKey::from_secret_key(&$secp_ctx, &privkey);
+ (privkey, pubkey)
+ }
+ }
+ }
+
+ macro_rules! get_sig_on {
+ ($privkey: expr, $ctx: expr, $string: expr) => {
+ {
+ let sighash = Message::from_slice(&$string.into_bytes()[..]).unwrap();
+ $ctx.sign(&sighash, &$privkey)
+ }
+ }
+ }
+
+ #[test]
+ fn encoding_announcement_signatures() {
+ let secp_ctx = Secp256k1::new();
+ let (privkey, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let sig_1 = get_sig_on!(privkey, secp_ctx, String::from("01010101010101010101010101010101"));
+ let sig_2 = get_sig_on!(privkey, secp_ctx, String::from("02020202020202020202020202020202"));
+ let announcement_signatures = msgs::AnnouncementSignatures {
+ channel_id: [4, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0],
+ short_channel_id: 2316138423780173,
+ node_signature: sig_1,
+ bitcoin_signature: sig_2,
+ };
+
+ let encoded_value = announcement_signatures.encode();
+ assert_eq!(encoded_value, hex::decode("040000000000000005000000000000000600000000000000070000000000000000083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073acf9953cef4700860f5967838eba2bae89288ad188ebf8b20bf995c3ea53a26df1876d0a3a0e13172ba286a673140190c02ba9da60a2e43a745188c8a83c7f3ef").unwrap());
+ }
+
+ fn do_encoding_channel_announcement(unknown_features_bits: bool, non_bitcoin_chain_hash: bool, excess_data: bool) {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let (privkey_2, pubkey_2) = get_keys_from!("0202020202020202020202020202020202020202020202020202020202020202", secp_ctx);
+ let (privkey_3, pubkey_3) = get_keys_from!("0303030303030303030303030303030303030303030303030303030303030303", secp_ctx);
+ let (privkey_4, pubkey_4) = get_keys_from!("0404040404040404040404040404040404040404040404040404040404040404", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+ let sig_2 = get_sig_on!(privkey_2, secp_ctx, String::from("01010101010101010101010101010101"));
+ let sig_3 = get_sig_on!(privkey_3, secp_ctx, String::from("01010101010101010101010101010101"));
+ let sig_4 = get_sig_on!(privkey_4, secp_ctx, String::from("01010101010101010101010101010101"));
+ let mut features = GlobalFeatures::new();
+ if unknown_features_bits {
+ features.flags = vec![0xFF, 0xFF];
+ }
+ let unsigned_channel_announcement = msgs::UnsignedChannelAnnouncement {
+ features,
+ chain_hash: if !non_bitcoin_chain_hash { Sha256dHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap() } else { Sha256dHash::from_hex("000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943").unwrap() },
+ short_channel_id: 2316138423780173,
+ node_id_1: pubkey_1,
+ node_id_2: pubkey_2,
+ bitcoin_key_1: pubkey_3,
+ bitcoin_key_2: pubkey_4,
+ excess_data: if excess_data { vec![10, 0, 0, 20, 0, 0, 30, 0, 0, 40] } else { Vec::new() },
+ };
+ let channel_announcement = msgs::ChannelAnnouncement {
+ node_signature_1: sig_1,
+ node_signature_2: sig_2,
+ bitcoin_signature_1: sig_3,
+ bitcoin_signature_2: sig_4,
+ contents: unsigned_channel_announcement,
+ };
+ let encoded_value = channel_announcement.encode();
+ let mut target_value = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a1735b6a427e80d5fe7cd90a2f4ee08dc9c27cda7c35a4172e5d85b12c49d4232537e98f9b1f3c5e6989a8b9644e90e8918127680dbd0d4043510840fc0f1e11a216c280b5395a2546e7e4b2663e04f811622f15a4f91e83aa2e92ba2a573c139142c54ae63072a1ec1ee7dc0c04bde5c847806172aa05c92c22ae8e308d1d2692b12cc195ce0a2d1bda6a88befa19fa07f51caa75ce83837f28965600b8aacab0855ffb0e741ec5f7c41421e9829a9d48611c8c831f71be5ea73e66594977ffd").unwrap();
+ if unknown_features_bits {
+ target_value.append(&mut hex::decode("0002ffff").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("0000").unwrap());
+ }
+ if non_bitcoin_chain_hash {
+ target_value.append(&mut hex::decode("43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f").unwrap());
+ }
+ target_value.append(&mut hex::decode("00083a840000034d031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f024d4b6cd1361032ca9bd2aeb9d900aa4d45d9ead80ac9423374c451a7254d076602531fe6068134503d2723133227c867ac8fa6c83c537e9a44c3c5bdbdcb1fe33703462779ad4aad39514614751a71085f2f10e1c7a593e4e030efb5b8721ce55b0b").unwrap());
+ if excess_data {
+ target_value.append(&mut hex::decode("0a00001400001e000028").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_channel_announcement() {
+ do_encoding_channel_announcement(false, false, false);
+ do_encoding_channel_announcement(true, false, false);
+ do_encoding_channel_announcement(true, true, false);
+ do_encoding_channel_announcement(true, true, true);
+ do_encoding_channel_announcement(false, true, true);
+ do_encoding_channel_announcement(false, false, true);
+ do_encoding_channel_announcement(false, true, false);
+ do_encoding_channel_announcement(true, false, true);
+ }
+
+ fn do_encoding_node_announcement(unknown_features_bits: bool, ipv4: bool, ipv6: bool, onionv2: bool, onionv3: bool, excess_address_data: bool, excess_data: bool) {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+ let mut features = GlobalFeatures::new();
+ if unknown_features_bits {
+ features.flags = vec![0xFF, 0xFF];
+ }
+ let mut addresses = Vec::new();
+ if ipv4 {
+ addresses.push(msgs::NetAddress::IPv4 {
+ addr: [255, 254, 253, 252],
+ port: 9735
+ });
+ }
+ if ipv6 {
+ addresses.push(msgs::NetAddress::IPv6 {
+ addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240],
+ port: 9735
+ });
+ }
+ if onionv2 {
+ addresses.push(msgs::NetAddress::OnionV2 {
+ addr: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246],
+ port: 9735
+ });
+ }
+ if onionv3 {
+ addresses.push(msgs::NetAddress::OnionV3 {
+ ed25519_pubkey: [255, 254, 253, 252, 251, 250, 249, 248, 247, 246, 245, 244, 243, 242, 241, 240, 239, 238, 237, 236, 235, 234, 233, 232, 231, 230, 229, 228, 227, 226, 225, 224],
+ checksum: 32,
+ version: 16,
+ port: 9735
+ });
+ }
+ let mut addr_len = 0;
+ for addr in &addresses {
+ addr_len += addr.len() + 1;
+ }
+ let unsigned_node_announcement = msgs::UnsignedNodeAnnouncement {
+ features,
+ timestamp: 20190119,
+ node_id: pubkey_1,
+ rgb: [32; 3],
+ alias: [16;32],
+ addresses,
+ excess_address_data: if excess_address_data { vec![33, 108, 40, 11, 83, 149, 162, 84, 110, 126, 75, 38, 99, 224, 79, 129, 22, 34, 241, 90, 79, 146, 232, 58, 162, 233, 43, 162, 165, 115, 193, 57, 20, 44, 84, 174, 99, 7, 42, 30, 193, 238, 125, 192, 192, 75, 222, 92, 132, 120, 6, 23, 42, 160, 92, 146, 194, 42, 232, 227, 8, 209, 210, 105] } else { Vec::new() },
+ excess_data: if excess_data { vec![59, 18, 204, 25, 92, 224, 162, 209, 189, 166, 168, 139, 239, 161, 159, 160, 127, 81, 202, 167, 92, 232, 56, 55, 242, 137, 101, 96, 11, 138, 172, 171, 8, 85, 255, 176, 231, 65, 236, 95, 124, 65, 66, 30, 152, 41, 169, 212, 134, 17, 200, 200, 49, 247, 27, 229, 234, 115, 230, 101, 148, 151, 127, 253] } else { Vec::new() },
+ };
+ addr_len += unsigned_node_announcement.excess_address_data.len() as u16;
+ let node_announcement = msgs::NodeAnnouncement {
+ signature: sig_1,
+ contents: unsigned_node_announcement,
+ };
+ let encoded_value = node_announcement.encode();
+ let mut target_value = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
+ if unknown_features_bits {
+ target_value.append(&mut hex::decode("0002ffff").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("0000").unwrap());
+ }
+ target_value.append(&mut hex::decode("013413a7031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f2020201010101010101010101010101010101010101010101010101010101010101010").unwrap());
+ target_value.append(&mut vec![(addr_len >> 8) as u8, addr_len as u8]);
+ if ipv4 {
+ target_value.append(&mut hex::decode("01fffefdfc2607").unwrap());
+ }
+ if ipv6 {
+ target_value.append(&mut hex::decode("02fffefdfcfbfaf9f8f7f6f5f4f3f2f1f02607").unwrap());
+ }
+ if onionv2 {
+ target_value.append(&mut hex::decode("03fffefdfcfbfaf9f8f7f62607").unwrap());
+ }
+ if onionv3 {
+ target_value.append(&mut hex::decode("04fffefdfcfbfaf9f8f7f6f5f4f3f2f1f0efeeedecebeae9e8e7e6e5e4e3e2e1e00020102607").unwrap());
+ }
+ if excess_address_data {
+ target_value.append(&mut hex::decode("216c280b5395a2546e7e4b2663e04f811622f15a4f92e83aa2e92ba2a573c139142c54ae63072a1ec1ee7dc0c04bde5c847806172aa05c92c22ae8e308d1d269").unwrap());
+ }
+ if excess_data {
+ target_value.append(&mut hex::decode("3b12cc195ce0a2d1bda6a88befa19fa07f51caa75ce83837f28965600b8aacab0855ffb0e741ec5f7c41421e9829a9d48611c8c831f71be5ea73e66594977ffd").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_node_announcement() {
+ do_encoding_node_announcement(true, true, true, true, true, true, true);
+ do_encoding_node_announcement(false, false, false, false, false, false, false);
+ do_encoding_node_announcement(false, true, false, false, false, false, false);
+ do_encoding_node_announcement(false, false, true, false, false, false, false);
+ do_encoding_node_announcement(false, false, false, true, false, false, false);
+ do_encoding_node_announcement(false, false, false, false, true, false, false);
+ do_encoding_node_announcement(false, false, false, false, false, true, false);
+ do_encoding_node_announcement(false, true, false, true, false, true, false);
+ do_encoding_node_announcement(false, false, true, false, true, false, false);
+ }
+
+ fn do_encoding_channel_update(non_bitcoin_chain_hash: bool, direction: bool, disable: bool, htlc_maximum_msat: bool) {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+ let unsigned_channel_update = msgs::UnsignedChannelUpdate {
+ chain_hash: if !non_bitcoin_chain_hash { Sha256dHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap() } else { Sha256dHash::from_hex("000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943").unwrap() },
+ short_channel_id: 2316138423780173,
+ timestamp: 20190119,
+ flags: if direction { 1 } else { 0 } | if disable { 1 << 1 } else { 0 } | if htlc_maximum_msat { 1 << 8 } else { 0 },
+ cltv_expiry_delta: 144,
+ htlc_minimum_msat: 1000000,
+ fee_base_msat: 10000,
+ fee_proportional_millionths: 20,
+ excess_data: if htlc_maximum_msat { vec![0, 0, 0, 0, 59, 154, 202, 0] } else { Vec::new() }
+ };
+ let channel_update = msgs::ChannelUpdate {
+ signature: sig_1,
+ contents: unsigned_channel_update
+ };
+ let encoded_value = channel_update.encode();
+ let mut target_value = hex::decode("d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
+ if non_bitcoin_chain_hash {
+ target_value.append(&mut hex::decode("43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f").unwrap());
+ }
+ target_value.append(&mut hex::decode("00083a840000034d013413a7").unwrap());
+ if htlc_maximum_msat {
+ target_value.append(&mut hex::decode("01").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("00").unwrap());
+ }
+ target_value.append(&mut hex::decode("00").unwrap());
+ if direction {
+ let flag = target_value.last_mut().unwrap();
+ *flag = 1;
+ }
+ if disable {
+ let flag = target_value.last_mut().unwrap();
+ *flag = *flag | 1 << 1;
+ }
+ target_value.append(&mut hex::decode("009000000000000f42400000271000000014").unwrap());
+ if htlc_maximum_msat {
+ target_value.append(&mut hex::decode("000000003b9aca00").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_channel_update() {
+ do_encoding_channel_update(false, false, false, false);
+ do_encoding_channel_update(true, false, false, false);
+ do_encoding_channel_update(false, true, false, false);
+ do_encoding_channel_update(false, false, true, false);
+ do_encoding_channel_update(false, false, false, true);
+ do_encoding_channel_update(true, true, true, true);
+ }
+
+ fn do_encoding_open_channel(non_bitcoin_chain_hash: bool, random_bit: bool, shutdown: bool) {
+ let secp_ctx = Secp256k1::new();
+ let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let (_, pubkey_2) = get_keys_from!("0202020202020202020202020202020202020202020202020202020202020202", secp_ctx);
+ let (_, pubkey_3) = get_keys_from!("0303030303030303030303030303030303030303030303030303030303030303", secp_ctx);
+ let (_, pubkey_4) = get_keys_from!("0404040404040404040404040404040404040404040404040404040404040404", secp_ctx);
+ let (_, pubkey_5) = get_keys_from!("0505050505050505050505050505050505050505050505050505050505050505", secp_ctx);
+ let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
+ let open_channel = msgs::OpenChannel {
+ chain_hash: if !non_bitcoin_chain_hash { Sha256dHash::from_hex("6fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000").unwrap() } else { Sha256dHash::from_hex("000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943").unwrap() },
+ temporary_channel_id: [2; 32],
+ funding_satoshis: 1311768467284833366,
+ push_msat: 2536655962884945560,
+ dust_limit_satoshis: 3608586615801332854,
+ max_htlc_value_in_flight_msat: 8517154655701053848,
+ channel_reserve_satoshis: 8665828695742877976,
+ htlc_minimum_msat: 2316138423780173,
+ feerate_per_kw: 821716,
+ to_self_delay: 49340,
+ max_accepted_htlcs: 49340,
+ funding_pubkey: pubkey_1,
+ revocation_basepoint: pubkey_2,
+ payment_basepoint: pubkey_3,
+ delayed_payment_basepoint: pubkey_4,
+ htlc_basepoint: pubkey_5,
+ first_per_commitment_point: pubkey_6,
+ channel_flags: if random_bit { 1 << 5 } else { 0 },
+ shutdown_scriptpubkey: if shutdown { OptionalField::Present(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).script_pubkey()) } else { OptionalField::Absent }
+ };
+ let encoded_value = open_channel.encode();
+ let mut target_value = Vec::new();
+ if non_bitcoin_chain_hash {
+ target_value.append(&mut hex::decode("43497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f").unwrap());
+ }
+ target_value.append(&mut hex::decode("02020202020202020202020202020202020202020202020202020202020202021234567890123456233403289122369832144668701144767633030896203198784335490624111800083a840000034d000c89d4c0bcc0bc031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f024d4b6cd1361032ca9bd2aeb9d900aa4d45d9ead80ac9423374c451a7254d076602531fe6068134503d2723133227c867ac8fa6c83c537e9a44c3c5bdbdcb1fe33703462779ad4aad39514614751a71085f2f10e1c7a593e4e030efb5b8721ce55b0b0362c0a046dacce86ddd0343c6d3c7c79c2208ba0d9c9cf24a6d046d21d21f90f703f006a18d5653c4edf5391ff23a61f03ff83d237e880ee61187fa9f379a028e0a").unwrap());
+ if random_bit {
+ target_value.append(&mut hex::decode("20").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("00").unwrap());
+ }
+ if shutdown {
+ target_value.append(&mut hex::decode("001976a91479b000887626b294a914501a4cd226b58b23598388ac").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_open_channel() {
+ do_encoding_open_channel(false, false, false);
+ do_encoding_open_channel(true, false, false);
+ do_encoding_open_channel(false, true, false);
+ do_encoding_open_channel(false, false, true);
+ do_encoding_open_channel(true, true, true);
+ }
+
+ fn do_encoding_accept_channel(shutdown: bool) {
+ let secp_ctx = Secp256k1::new();
+ let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let (_, pubkey_2) = get_keys_from!("0202020202020202020202020202020202020202020202020202020202020202", secp_ctx);
+ let (_, pubkey_3) = get_keys_from!("0303030303030303030303030303030303030303030303030303030303030303", secp_ctx);
+ let (_, pubkey_4) = get_keys_from!("0404040404040404040404040404040404040404040404040404040404040404", secp_ctx);
+ let (_, pubkey_5) = get_keys_from!("0505050505050505050505050505050505050505050505050505050505050505", secp_ctx);
+ let (_, pubkey_6) = get_keys_from!("0606060606060606060606060606060606060606060606060606060606060606", secp_ctx);
+ let accept_channel = msgs::AcceptChannel {
+ temporary_channel_id: [2; 32],
+ dust_limit_satoshis: 1311768467284833366,
+ max_htlc_value_in_flight_msat: 2536655962884945560,
+ channel_reserve_satoshis: 3608586615801332854,
+ htlc_minimum_msat: 2316138423780173,
+ minimum_depth: 821716,
+ to_self_delay: 49340,
+ max_accepted_htlcs: 49340,
+ funding_pubkey: pubkey_1,
+ revocation_basepoint: pubkey_2,
+ payment_basepoint: pubkey_3,
+ delayed_payment_basepoint: pubkey_4,
+ htlc_basepoint: pubkey_5,
+ first_per_commitment_point: pubkey_6,
+ shutdown_scriptpubkey: if shutdown { OptionalField::Present(Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).script_pubkey()) } else { OptionalField::Absent }
+ };
+ let encoded_value = accept_channel.encode();
+ let mut target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020212345678901234562334032891223698321446687011447600083a840000034d000c89d4c0bcc0bc031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f024d4b6cd1361032ca9bd2aeb9d900aa4d45d9ead80ac9423374c451a7254d076602531fe6068134503d2723133227c867ac8fa6c83c537e9a44c3c5bdbdcb1fe33703462779ad4aad39514614751a71085f2f10e1c7a593e4e030efb5b8721ce55b0b0362c0a046dacce86ddd0343c6d3c7c79c2208ba0d9c9cf24a6d046d21d21f90f703f006a18d5653c4edf5391ff23a61f03ff83d237e880ee61187fa9f379a028e0a").unwrap();
+ if shutdown {
+ target_value.append(&mut hex::decode("001976a91479b000887626b294a914501a4cd226b58b23598388ac").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_accept_channel() {
+ do_encoding_accept_channel(false);
+ do_encoding_accept_channel(true);
+ }
+
+ #[test]
+ fn encoding_funding_created() {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+ let funding_created = msgs::FundingCreated {
+ temporary_channel_id: [2; 32],
+ funding_txid: Sha256dHash::from_hex("c2d4449afa8d26140898dd54d3390b057ba2a5afcf03ba29d7dc0d8b9ffe966e").unwrap(),
+ funding_output_index: 255,
+ signature: sig_1,
+ };
+ let encoded_value = funding_created.encode();
+ let target_value = hex::decode("02020202020202020202020202020202020202020202020202020202020202026e96fe9f8b0ddcd729ba03cfafa5a27b050b39d354dd980814268dfa9a44d4c200ffd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_funding_signed() {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+ let funding_signed = msgs::FundingSigned {
+ channel_id: [2; 32],
+ signature: sig_1,
+ };
+ let encoded_value = funding_signed.encode();
+ let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_funding_locked() {
+ let secp_ctx = Secp256k1::new();
+ let (_, pubkey_1,) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let funding_locked = msgs::FundingLocked {
+ channel_id: [2; 32],
+ next_per_commitment_point: pubkey_1,
+ };
+ let encoded_value = funding_locked.encode();
+ let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ fn do_encoding_shutdown(script_type: u8) {
+ let secp_ctx = Secp256k1::new();
+ let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let script = Builder::new().push_opcode(opcodes::OP_TRUE).into_script();
+ let shutdown = msgs::Shutdown {
+ channel_id: [2; 32],
+ scriptpubkey: if script_type == 1 { Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).script_pubkey() } else if script_type == 2 { Address::p2sh(&script, Network::Testnet).script_pubkey() } else if script_type == 3 { Address::p2wpkh(&::bitcoin::PublicKey{compressed: true, key: pubkey_1}, Network::Testnet).script_pubkey() } else { Address::p2wsh(&script, Network::Testnet).script_pubkey() },
+ };
+ let encoded_value = shutdown.encode();
+ let mut target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap();
+ if script_type == 1 {
+ target_value.append(&mut hex::decode("001976a91479b000887626b294a914501a4cd226b58b23598388ac").unwrap());
+ } else if script_type == 2 {
+ target_value.append(&mut hex::decode("0017a914da1745e9b549bd0bfa1a569971c77eba30cd5a4b87").unwrap());
+ } else if script_type == 3 {
+ target_value.append(&mut hex::decode("0016001479b000887626b294a914501a4cd226b58b235983").unwrap());
+ } else if script_type == 4 {
+ target_value.append(&mut hex::decode("002200204ae81572f06e1b88fd5ced7a1a000945432e83e1551e6f721ee9c00b8cc33260").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_shutdown() {
+ do_encoding_shutdown(1);
+ do_encoding_shutdown(2);
+ do_encoding_shutdown(3);
+ do_encoding_shutdown(4);
+ }
+
+ #[test]
+ fn encoding_closing_signed() {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+ let closing_signed = msgs::ClosingSigned {
+ channel_id: [2; 32],
+ fee_satoshis: 2316138423780173,
+ signature: sig_1,
+ };
+ let encoded_value = closing_signed.encode();
+ let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034dd977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_update_add_htlc() {
+ let secp_ctx = Secp256k1::new();
+ let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let onion_routing_packet = msgs::OnionPacket {
+ version: 255,
+ public_key: Ok(pubkey_1),
+ hop_data: [1; 20*65],
+ hmac: [2; 32]
+ };
+ let update_add_htlc = msgs::UpdateAddHTLC {
+ channel_id: [2; 32],
+ htlc_id: 2316138423780173,
+ amount_msat: 3608586615801332854,
+ payment_hash: PaymentHash([1; 32]),
+ cltv_expiry: 821716,
+ onion_routing_packet
+ };
+ let encoded_value = update_add_htlc.encode();
+ let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034d32144668701144760101010101010101010101010101010101010101010101010101010101010101000c89d4ff031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010202020202020202020202020202020202020202020202020202020202020202").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_update_fulfill_htlc() {
+ let update_fulfill_htlc = msgs::UpdateFulfillHTLC {
+ channel_id: [2; 32],
+ htlc_id: 2316138423780173,
+ payment_preimage: PaymentPreimage([1; 32]),
+ };
+ let encoded_value = update_fulfill_htlc.encode();
+ let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034d0101010101010101010101010101010101010101010101010101010101010101").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_update_fail_htlc() {
+ let reason = OnionErrorPacket {
+ data: [1; 32].to_vec(),
+ };
+ let update_fail_htlc = msgs::UpdateFailHTLC {
+ channel_id: [2; 32],
+ htlc_id: 2316138423780173,
+ reason
+ };
+ let encoded_value = update_fail_htlc.encode();
+ let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034d00200101010101010101010101010101010101010101010101010101010101010101").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_update_fail_malformed_htlc() {
+ let update_fail_malformed_htlc = msgs::UpdateFailMalformedHTLC {
+ channel_id: [2; 32],
+ htlc_id: 2316138423780173,
+ sha256_of_onion: [1; 32],
+ failure_code: 255
+ };
+ let encoded_value = update_fail_malformed_htlc.encode();
+ let target_value = hex::decode("020202020202020202020202020202020202020202020202020202020202020200083a840000034d010101010101010101010101010101010101010101010101010101010101010100ff").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ fn do_encoding_commitment_signed(htlcs: bool) {
+ let secp_ctx = Secp256k1::new();
+ let (privkey_1, _) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let (privkey_2, _) = get_keys_from!("0202020202020202020202020202020202020202020202020202020202020202", secp_ctx);
+ let (privkey_3, _) = get_keys_from!("0303030303030303030303030303030303030303030303030303030303030303", secp_ctx);
+ let (privkey_4, _) = get_keys_from!("0404040404040404040404040404040404040404040404040404040404040404", secp_ctx);
+ let sig_1 = get_sig_on!(privkey_1, secp_ctx, String::from("01010101010101010101010101010101"));
+ let sig_2 = get_sig_on!(privkey_2, secp_ctx, String::from("01010101010101010101010101010101"));
+ let sig_3 = get_sig_on!(privkey_3, secp_ctx, String::from("01010101010101010101010101010101"));
+ let sig_4 = get_sig_on!(privkey_4, secp_ctx, String::from("01010101010101010101010101010101"));
+ let commitment_signed = msgs::CommitmentSigned {
+ channel_id: [2; 32],
+ signature: sig_1,
+ htlc_signatures: if htlcs { vec![sig_2, sig_3, sig_4] } else { Vec::new() },
+ };
+ let encoded_value = commitment_signed.encode();
+ let mut target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202d977cb9b53d93a6ff64bb5f1e158b4094b66e798fb12911168a3ccdf80a83096340a6a95da0ae8d9f776528eecdbb747eb6b545495a4319ed5378e35b21e073a").unwrap();
+ if htlcs {
+ target_value.append(&mut hex::decode("00031735b6a427e80d5fe7cd90a2f4ee08dc9c27cda7c35a4172e5d85b12c49d4232537e98f9b1f3c5e6989a8b9644e90e8918127680dbd0d4043510840fc0f1e11a216c280b5395a2546e7e4b2663e04f811622f15a4f91e83aa2e92ba2a573c139142c54ae63072a1ec1ee7dc0c04bde5c847806172aa05c92c22ae8e308d1d2692b12cc195ce0a2d1bda6a88befa19fa07f51caa75ce83837f28965600b8aacab0855ffb0e741ec5f7c41421e9829a9d48611c8c831f71be5ea73e66594977ffd").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("0000").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_commitment_signed() {
+ do_encoding_commitment_signed(true);
+ do_encoding_commitment_signed(false);
+ }
+
+ #[test]
+ fn encoding_revoke_and_ack() {
+ let secp_ctx = Secp256k1::new();
+ let (_, pubkey_1) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+ let raa = msgs::RevokeAndACK {
+ channel_id: [2; 32],
+ per_commitment_secret: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
+ next_per_commitment_point: pubkey_1,
+ };
+ let encoded_value = raa.encode();
+ let target_value = hex::decode("02020202020202020202020202020202020202020202020202020202020202020101010101010101010101010101010101010101010101010101010101010101031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_update_fee() {
+ let update_fee = msgs::UpdateFee {
+ channel_id: [2; 32],
+ feerate_per_kw: 20190119,
+ };
+ let encoded_value = update_fee.encode();
+ let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202013413a7").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ fn do_encoding_init(unknown_global_bits: bool, initial_routing_sync: bool) {
+ let mut global = GlobalFeatures::new();
+ if unknown_global_bits {
+ global.flags = vec![0xFF, 0xFF];
+ }
+ let mut local = LocalFeatures::new();
+ if initial_routing_sync {
+ local.set_initial_routing_sync();
+ }
+ let init = msgs::Init {
+ global_features: global,
+ local_features: local,
+ };
+ let encoded_value = init.encode();
+ let mut target_value = Vec::new();
+ if unknown_global_bits {
+ target_value.append(&mut hex::decode("0002ffff").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("0000").unwrap());
+ }
+ if initial_routing_sync {
+ target_value.append(&mut hex::decode("00012a").unwrap());
+ } else {
+ target_value.append(&mut hex::decode("000122").unwrap());
+ }
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_init() {
+ do_encoding_init(false, false);
+ do_encoding_init(true, false);
+ do_encoding_init(false, true);
+ do_encoding_init(true, true);
+ }
+
+ #[test]
+ fn encoding_error() {
+ let error = msgs::ErrorMessage {
+ channel_id: [2; 32],
+ data: String::from("rust-lightning"),
+ };
+ let encoded_value = error.encode();
+ let target_value = hex::decode("0202020202020202020202020202020202020202020202020202020202020202000e727573742d6c696768746e696e67").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_ping() {
+ let ping = msgs::Ping {
+ ponglen: 64,
+ byteslen: 64
+ };
+ let encoded_value = ping.encode();
+ let target_value = hex::decode("0040004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+
+ #[test]
+ fn encoding_pong() {
+ let pong = msgs::Pong {
+ byteslen: 64
+ };
+ let encoded_value = pong.encode();
+ let target_value = hex::decode("004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
+ assert_eq!(encoded_value, target_value);
+ }
+}
--- /dev/null
- use ln::msgs::HandleError;
++use ln::msgs::LightningError;
+use ln::msgs;
+
+use bitcoin_hashes::{Hash, HashEngine, Hmac, HmacEngine};
+use bitcoin_hashes::sha256::Hash as Sha256;
+
+use secp256k1::Secp256k1;
+use secp256k1::key::{PublicKey,SecretKey};
+use secp256k1::ecdh::SharedSecret;
+use secp256k1;
+
+use util::chacha20poly1305rfc::ChaCha20Poly1305RFC;
+use util::byte_utils;
+
+// Sha256("Noise_XK_secp256k1_ChaChaPoly_SHA256")
+const NOISE_CK: [u8; 32] = [0x26, 0x40, 0xf5, 0x2e, 0xeb, 0xcd, 0x9e, 0x88, 0x29, 0x58, 0x95, 0x1c, 0x79, 0x42, 0x50, 0xee, 0xdb, 0x28, 0x00, 0x2c, 0x05, 0xd7, 0xdc, 0x2e, 0xa0, 0xf1, 0x95, 0x40, 0x60, 0x42, 0xca, 0xf1];
+// Sha256(NOISE_CK || "lightning")
+const NOISE_H: [u8; 32] = [0xd1, 0xfb, 0xf6, 0xde, 0xe4, 0xf6, 0x86, 0xf1, 0x32, 0xfd, 0x70, 0x2c, 0x4a, 0xbf, 0x8f, 0xba, 0x4b, 0xb4, 0x20, 0xd8, 0x9d, 0x2a, 0x04, 0x8a, 0x3c, 0x4f, 0x4c, 0x09, 0x2e, 0x37, 0xb6, 0x76];
+
+pub enum NextNoiseStep {
+ ActOne,
+ ActTwo,
+ ActThree,
+ NoiseComplete,
+}
+
+#[derive(PartialEq)]
+enum NoiseStep {
+ PreActOne,
+ PostActOne,
+ PostActTwo,
+ // When done swap noise_state for NoiseState::Finished
+}
+
+struct BidirectionalNoiseState {
+ h: [u8; 32],
+ ck: [u8; 32],
+}
+enum DirectionalNoiseState {
+ Outbound {
+ ie: SecretKey,
+ },
+ Inbound {
+ ie: Option<PublicKey>, // filled in if state >= PostActOne
+ re: Option<SecretKey>, // filled in if state >= PostActTwo
+ temp_k2: Option<[u8; 32]>, // filled in if state >= PostActTwo
+ }
+}
+enum NoiseState {
+ InProgress {
+ state: NoiseStep,
+ directional_state: DirectionalNoiseState,
+ bidirectional_state: BidirectionalNoiseState,
+ },
+ Finished {
+ sk: [u8; 32],
+ sn: u64,
+ sck: [u8; 32],
+ rk: [u8; 32],
+ rn: u64,
+ rck: [u8; 32],
+ }
+}
+
+pub struct PeerChannelEncryptor {
+ secp_ctx: Secp256k1<secp256k1::SignOnly>,
+ their_node_id: Option<PublicKey>, // filled in for outbound, or inbound after noise_state is Finished
+
+ noise_state: NoiseState,
+}
+
+impl PeerChannelEncryptor {
+ pub fn new_outbound(their_node_id: PublicKey, ephemeral_key: SecretKey) -> PeerChannelEncryptor {
+ let secp_ctx = Secp256k1::signing_only();
+
+ let mut sha = Sha256::engine();
+ sha.input(&NOISE_H);
+ sha.input(&their_node_id.serialize()[..]);
+ let h = Sha256::from_engine(sha).into_inner();
+
+ PeerChannelEncryptor {
+ their_node_id: Some(their_node_id),
+ secp_ctx: secp_ctx,
+ noise_state: NoiseState::InProgress {
+ state: NoiseStep::PreActOne,
+ directional_state: DirectionalNoiseState::Outbound {
+ ie: ephemeral_key,
+ },
+ bidirectional_state: BidirectionalNoiseState {
+ h: h,
+ ck: NOISE_CK,
+ },
+ }
+ }
+ }
+
+ pub fn new_inbound(our_node_secret: &SecretKey) -> PeerChannelEncryptor {
+ let secp_ctx = Secp256k1::signing_only();
+
+ let mut sha = Sha256::engine();
+ sha.input(&NOISE_H);
+ let our_node_id = PublicKey::from_secret_key(&secp_ctx, our_node_secret);
+ sha.input(&our_node_id.serialize()[..]);
+ let h = Sha256::from_engine(sha).into_inner();
+
+ PeerChannelEncryptor {
+ their_node_id: None,
+ secp_ctx: secp_ctx,
+ noise_state: NoiseState::InProgress {
+ state: NoiseStep::PreActOne,
+ directional_state: DirectionalNoiseState::Inbound {
+ ie: None,
+ re: None,
+ temp_k2: None,
+ },
+ bidirectional_state: BidirectionalNoiseState {
+ h: h,
+ ck: NOISE_CK,
+ },
+ }
+ }
+ }
+
+ #[inline]
+ fn encrypt_with_ad(res: &mut[u8], n: u64, key: &[u8; 32], h: &[u8], plaintext: &[u8]) {
+ let mut nonce = [0; 12];
+ nonce[4..].copy_from_slice(&byte_utils::le64_to_array(n));
+
+ let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h);
+ let mut tag = [0; 16];
+ chacha.encrypt(plaintext, &mut res[0..plaintext.len()], &mut tag);
+ res[plaintext.len()..].copy_from_slice(&tag);
+ }
+
+ #[inline]
- fn decrypt_with_ad(res: &mut[u8], n: u64, key: &[u8; 32], h: &[u8], cyphertext: &[u8]) -> Result<(), HandleError> {
++ fn decrypt_with_ad(res: &mut[u8], n: u64, key: &[u8; 32], h: &[u8], cyphertext: &[u8]) -> Result<(), LightningError> {
+ let mut nonce = [0; 12];
+ nonce[4..].copy_from_slice(&byte_utils::le64_to_array(n));
+
+ let mut chacha = ChaCha20Poly1305RFC::new(key, &nonce, h);
+ if !chacha.decrypt(&cyphertext[0..cyphertext.len() - 16], res, &cyphertext[cyphertext.len() - 16..]) {
- return Err(HandleError{err: "Bad MAC", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })});
++ return Err(LightningError{err: "Bad MAC", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
+ }
+ Ok(())
+ }
+
+ fn hkdf_extract_expand(salt: &[u8], ikm: &[u8]) -> ([u8; 32], [u8; 32]) {
+ let mut hmac = HmacEngine::<Sha256>::new(salt);
+ hmac.input(ikm);
+ let prk = Hmac::from_engine(hmac).into_inner();
+ let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+ hmac.input(&[1; 1]);
+ let t1 = Hmac::from_engine(hmac).into_inner();
+ let mut hmac = HmacEngine::<Sha256>::new(&prk[..]);
+ hmac.input(&t1);
+ hmac.input(&[2; 1]);
+ (t1, Hmac::from_engine(hmac).into_inner())
+ }
+
+ #[inline]
+ fn hkdf(state: &mut BidirectionalNoiseState, ss: SharedSecret) -> [u8; 32] {
+ let (t1, t2) = Self::hkdf_extract_expand(&state.ck, &ss[..]);
+ state.ck = t1;
+ t2
+ }
+
+ #[inline]
+ fn outbound_noise_act<T: secp256k1::Signing>(secp_ctx: &Secp256k1<T>, state: &mut BidirectionalNoiseState, our_key: &SecretKey, their_key: &PublicKey) -> ([u8; 50], [u8; 32]) {
+ let our_pub = PublicKey::from_secret_key(secp_ctx, &our_key);
+
+ let mut sha = Sha256::engine();
+ sha.input(&state.h);
+ sha.input(&our_pub.serialize()[..]);
+ state.h = Sha256::from_engine(sha).into_inner();
+
+ let ss = SharedSecret::new(&their_key, &our_key);
+ let temp_k = PeerChannelEncryptor::hkdf(state, ss);
+
+ let mut res = [0; 50];
+ res[1..34].copy_from_slice(&our_pub.serialize()[..]);
+ PeerChannelEncryptor::encrypt_with_ad(&mut res[34..], 0, &temp_k, &state.h, &[0; 0]);
+
+ let mut sha = Sha256::engine();
+ sha.input(&state.h);
+ sha.input(&res[34..]);
+ state.h = Sha256::from_engine(sha).into_inner();
+
+ (res, temp_k)
+ }
+
+ #[inline]
- fn inbound_noise_act(state: &mut BidirectionalNoiseState, act: &[u8], our_key: &SecretKey) -> Result<(PublicKey, [u8; 32]), HandleError> {
++ fn inbound_noise_act(state: &mut BidirectionalNoiseState, act: &[u8], our_key: &SecretKey) -> Result<(PublicKey, [u8; 32]), LightningError> {
+ assert_eq!(act.len(), 50);
+
+ if act[0] != 0 {
- return Err(HandleError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })});
++ return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
+ }
+
+ let their_pub = match PublicKey::from_slice(&act[1..34]) {
- Err(_) => return Err(HandleError{err: "Invalid public key", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}),
++ Err(_) => return Err(LightningError{err: "Invalid public key", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
+ Ok(key) => key,
+ };
+
+ let mut sha = Sha256::engine();
+ sha.input(&state.h);
+ sha.input(&their_pub.serialize()[..]);
+ state.h = Sha256::from_engine(sha).into_inner();
+
+ let ss = SharedSecret::new(&their_pub, &our_key);
+ let temp_k = PeerChannelEncryptor::hkdf(state, ss);
+
+ let mut dec = [0; 0];
+ PeerChannelEncryptor::decrypt_with_ad(&mut dec, 0, &temp_k, &state.h, &act[34..])?;
+
+ let mut sha = Sha256::engine();
+ sha.input(&state.h);
+ sha.input(&act[34..]);
+ state.h = Sha256::from_engine(sha).into_inner();
+
+ Ok((their_pub, temp_k))
+ }
+
+ pub fn get_act_one(&mut self) -> [u8; 50] {
+ match self.noise_state {
+ NoiseState::InProgress { ref mut state, ref directional_state, ref mut bidirectional_state } =>
+ match directional_state {
+ &DirectionalNoiseState::Outbound { ref ie } => {
+ if *state != NoiseStep::PreActOne {
+ panic!("Requested act at wrong step");
+ }
+
+ let (res, _) = PeerChannelEncryptor::outbound_noise_act(&self.secp_ctx, bidirectional_state, &ie, &self.their_node_id.unwrap());
+ *state = NoiseStep::PostActOne;
+ res
+ },
+ _ => panic!("Wrong direction for act"),
+ },
+ _ => panic!("Cannot get act one after noise handshake completes"),
+ }
+ }
+
- pub fn process_act_one_with_keys(&mut self, act_one: &[u8], our_node_secret: &SecretKey, our_ephemeral: SecretKey) -> Result<[u8; 50], HandleError> {
++ pub fn process_act_one_with_keys(&mut self, act_one: &[u8], our_node_secret: &SecretKey, our_ephemeral: SecretKey) -> Result<[u8; 50], LightningError> {
+ assert_eq!(act_one.len(), 50);
+
+ match self.noise_state {
+ NoiseState::InProgress { ref mut state, ref mut directional_state, ref mut bidirectional_state } =>
+ match directional_state {
+ &mut DirectionalNoiseState::Inbound { ref mut ie, ref mut re, ref mut temp_k2 } => {
+ if *state != NoiseStep::PreActOne {
+ panic!("Requested act at wrong step");
+ }
+
+ let (their_pub, _) = PeerChannelEncryptor::inbound_noise_act(bidirectional_state, act_one, &our_node_secret)?;
+ ie.get_or_insert(their_pub);
+
+ re.get_or_insert(our_ephemeral);
+
+ let (res, temp_k) = PeerChannelEncryptor::outbound_noise_act(&self.secp_ctx, bidirectional_state, &re.unwrap(), &ie.unwrap());
+ *temp_k2 = Some(temp_k);
+ *state = NoiseStep::PostActTwo;
+ Ok(res)
+ },
+ _ => panic!("Wrong direction for act"),
+ },
+ _ => panic!("Cannot get act one after noise handshake completes"),
+ }
+ }
+
- pub fn process_act_two(&mut self, act_two: &[u8], our_node_secret: &SecretKey) -> Result<([u8; 66], PublicKey), HandleError> {
++ pub fn process_act_two(&mut self, act_two: &[u8], our_node_secret: &SecretKey) -> Result<([u8; 66], PublicKey), LightningError> {
+ assert_eq!(act_two.len(), 50);
+
+ let final_hkdf;
+ let ck;
+ let res: [u8; 66] = match self.noise_state {
+ NoiseState::InProgress { ref state, ref directional_state, ref mut bidirectional_state } =>
+ match directional_state {
+ &DirectionalNoiseState::Outbound { ref ie } => {
+ if *state != NoiseStep::PostActOne {
+ panic!("Requested act at wrong step");
+ }
+
+ let (re, temp_k2) = PeerChannelEncryptor::inbound_noise_act(bidirectional_state, act_two, &ie)?;
+
+ let mut res = [0; 66];
+ let our_node_id = PublicKey::from_secret_key(&self.secp_ctx, &our_node_secret);
+
+ PeerChannelEncryptor::encrypt_with_ad(&mut res[1..50], 1, &temp_k2, &bidirectional_state.h, &our_node_id.serialize()[..]);
+
+ let mut sha = Sha256::engine();
+ sha.input(&bidirectional_state.h);
+ sha.input(&res[1..50]);
+ bidirectional_state.h = Sha256::from_engine(sha).into_inner();
+
+ let ss = SharedSecret::new(&re, our_node_secret);
+ let temp_k = PeerChannelEncryptor::hkdf(bidirectional_state, ss);
+
+ PeerChannelEncryptor::encrypt_with_ad(&mut res[50..], 0, &temp_k, &bidirectional_state.h, &[0; 0]);
+ final_hkdf = Self::hkdf_extract_expand(&bidirectional_state.ck, &[0; 0]);
+ ck = bidirectional_state.ck.clone();
+ res
+ },
+ _ => panic!("Wrong direction for act"),
+ },
+ _ => panic!("Cannot get act one after noise handshake completes"),
+ };
+
+ let (sk, rk) = final_hkdf;
+ self.noise_state = NoiseState::Finished {
+ sk: sk,
+ sn: 0,
+ sck: ck.clone(),
+ rk: rk,
+ rn: 0,
+ rck: ck,
+ };
+
+ Ok((res, self.their_node_id.unwrap().clone()))
+ }
+
- pub fn process_act_three(&mut self, act_three: &[u8]) -> Result<PublicKey, HandleError> {
++ pub fn process_act_three(&mut self, act_three: &[u8]) -> Result<PublicKey, LightningError> {
+ assert_eq!(act_three.len(), 66);
+
+ let final_hkdf;
+ let ck;
+ match self.noise_state {
+ NoiseState::InProgress { ref state, ref directional_state, ref mut bidirectional_state } =>
+ match directional_state {
+ &DirectionalNoiseState::Inbound { ie: _, ref re, ref temp_k2 } => {
+ if *state != NoiseStep::PostActTwo {
+ panic!("Requested act at wrong step");
+ }
+ if act_three[0] != 0 {
- return Err(HandleError{err: "Unknown handshake version number", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })});
++ return Err(LightningError{err: "Unknown handshake version number", action: msgs::ErrorAction::DisconnectPeer{ msg: None }});
+ }
+
+ let mut their_node_id = [0; 33];
+ PeerChannelEncryptor::decrypt_with_ad(&mut their_node_id, 1, &temp_k2.unwrap(), &bidirectional_state.h, &act_three[1..50])?;
+ self.their_node_id = Some(match PublicKey::from_slice(&their_node_id) {
+ Ok(key) => key,
- Err(_) => return Err(HandleError{err: "Bad node_id from peer", action: Some(msgs::ErrorAction::DisconnectPeer{ msg: None })}),
++ Err(_) => return Err(LightningError{err: "Bad node_id from peer", action: msgs::ErrorAction::DisconnectPeer{ msg: None }}),
+ });
+
+ let mut sha = Sha256::engine();
+ sha.input(&bidirectional_state.h);
+ sha.input(&act_three[1..50]);
+ bidirectional_state.h = Sha256::from_engine(sha).into_inner();
+
+ let ss = SharedSecret::new(&self.their_node_id.unwrap(), &re.unwrap());
+ let temp_k = PeerChannelEncryptor::hkdf(bidirectional_state, ss);
+
+ PeerChannelEncryptor::decrypt_with_ad(&mut [0; 0], 0, &temp_k, &bidirectional_state.h, &act_three[50..])?;
+ final_hkdf = Self::hkdf_extract_expand(&bidirectional_state.ck, &[0; 0]);
+ ck = bidirectional_state.ck.clone();
+ },
+ _ => panic!("Wrong direction for act"),
+ },
+ _ => panic!("Cannot get act one after noise handshake completes"),
+ }
+
+ let (rk, sk) = final_hkdf;
+ self.noise_state = NoiseState::Finished {
+ sk: sk,
+ sn: 0,
+ sck: ck.clone(),
+ rk: rk,
+ rn: 0,
+ rck: ck,
+ };
+
+ Ok(self.their_node_id.unwrap().clone())
+ }
+
+ /// Encrypts the given message, returning the encrypted version
+ /// panics if msg.len() > 65535 or Noise handshake has not finished.
+ pub fn encrypt_message(&mut self, msg: &[u8]) -> Vec<u8> {
+ if msg.len() > 65535 {
+ panic!("Attempted to encrypt message longer than 65535 bytes!");
+ }
+
+ let mut res = Vec::with_capacity(msg.len() + 16*2 + 2);
+ res.resize(msg.len() + 16*2 + 2, 0);
+
+ match self.noise_state {
+ NoiseState::Finished { ref mut sk, ref mut sn, ref mut sck, rk: _, rn: _, rck: _ } => {
+ if *sn >= 1000 {
+ let (new_sck, new_sk) = Self::hkdf_extract_expand(sck, sk);
+ *sck = new_sck;
+ *sk = new_sk;
+ *sn = 0;
+ }
+
+ Self::encrypt_with_ad(&mut res[0..16+2], *sn, sk, &[0; 0], &byte_utils::be16_to_array(msg.len() as u16));
+ *sn += 1;
+
+ Self::encrypt_with_ad(&mut res[16+2..], *sn, sk, &[0; 0], msg);
+ *sn += 1;
+ },
+ _ => panic!("Tried to encrypt a message prior to noise handshake completion"),
+ }
+
+ res
+ }
+
+ /// Decrypts a message length header from the remote peer.
+ /// panics if noise handshake has not yet finished or msg.len() != 18
- pub fn decrypt_length_header(&mut self, msg: &[u8]) -> Result<u16, HandleError> {
++ pub fn decrypt_length_header(&mut self, msg: &[u8]) -> Result<u16, LightningError> {
+ assert_eq!(msg.len(), 16+2);
+
+ match self.noise_state {
+ NoiseState::Finished { sk: _, sn: _, sck: _, ref mut rk, ref mut rn, ref mut rck } => {
+ if *rn >= 1000 {
+ let (new_rck, new_rk) = Self::hkdf_extract_expand(rck, rk);
+ *rck = new_rck;
+ *rk = new_rk;
+ *rn = 0;
+ }
+
+ let mut res = [0; 2];
+ Self::decrypt_with_ad(&mut res, *rn, rk, &[0; 0], msg)?;
+ *rn += 1;
+ Ok(byte_utils::slice_to_be16(&res))
+ },
+ _ => panic!("Tried to encrypt a message prior to noise handshake completion"),
+ }
+ }
+
+ /// Decrypts the given message.
+ /// panics if msg.len() > 65535 + 16
- pub fn decrypt_message(&mut self, msg: &[u8]) -> Result<Vec<u8>, HandleError> {
++ pub fn decrypt_message(&mut self, msg: &[u8]) -> Result<Vec<u8>, LightningError> {
+ if msg.len() > 65535 + 16 {
+ panic!("Attempted to encrypt message longer than 65535 bytes!");
+ }
+
+ match self.noise_state {
+ NoiseState::Finished { sk: _, sn: _, sck: _, ref rk, ref mut rn, rck: _ } => {
+ let mut res = Vec::with_capacity(msg.len() - 16);
+ res.resize(msg.len() - 16, 0);
+ Self::decrypt_with_ad(&mut res[..], *rn, rk, &[0; 0], msg)?;
+ *rn += 1;
+
+ Ok(res)
+ },
+ _ => panic!("Tried to encrypt a message prior to noise handshake completion"),
+ }
+ }
+
+ pub fn get_noise_step(&self) -> NextNoiseStep {
+ match self.noise_state {
+ NoiseState::InProgress {ref state, ..} => {
+ match state {
+ &NoiseStep::PreActOne => NextNoiseStep::ActOne,
+ &NoiseStep::PostActOne => NextNoiseStep::ActTwo,
+ &NoiseStep::PostActTwo => NextNoiseStep::ActThree,
+ }
+ },
+ NoiseState::Finished {..} => NextNoiseStep::NoiseComplete,
+ }
+ }
+
+ pub fn is_ready_for_encryption(&self) -> bool {
+ match self.noise_state {
+ NoiseState::InProgress {..} => { false },
+ NoiseState::Finished {..} => { true }
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use secp256k1::key::{PublicKey,SecretKey};
+
+ use hex;
+
+ use ln::peer_channel_encryptor::{PeerChannelEncryptor,NoiseState};
+
+ fn get_outbound_peer_for_initiator_test_vectors() -> PeerChannelEncryptor {
+ let their_node_id = PublicKey::from_slice(&hex::decode("028d7500dd4c12685d1f568b4c2b5048e8534b873319f3a8daa612b469132ec7f7").unwrap()[..]).unwrap();
+
+ let mut outbound_peer = PeerChannelEncryptor::new_outbound(their_node_id, SecretKey::from_slice(&hex::decode("1212121212121212121212121212121212121212121212121212121212121212").unwrap()[..]).unwrap());
+ assert_eq!(outbound_peer.get_act_one()[..], hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap()[..]);
+ outbound_peer
+ }
+
+ #[test]
+ fn noise_initiator_test_vectors() {
+ let our_node_id = SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap();
+
+ {
+ // transport-initiator successful handshake
+ let mut outbound_peer = get_outbound_peer_for_initiator_test_vectors();
+
+ let act_two = hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap().to_vec();
+ assert_eq!(outbound_peer.process_act_two(&act_two[..], &our_node_id).unwrap().0[..], hex::decode("00b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap()[..]);
+
+ match outbound_peer.noise_state {
+ NoiseState::Finished { sk, sn, sck, rk, rn, rck } => {
+ assert_eq!(sk, hex::decode("969ab31b4d288cedf6218839b27a3e2140827047f2c0f01bf5c04435d43511a9").unwrap()[..]);
+ assert_eq!(sn, 0);
+ assert_eq!(sck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ assert_eq!(rk, hex::decode("bb9020b8965f4df047e07f955f3c4b88418984aadc5cdb35096b9ea8fa5c3442").unwrap()[..]);
+ assert_eq!(rn, 0);
+ assert_eq!(rck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ },
+ _ => panic!()
+ }
+ }
+ {
+ // transport-initiator act2 short read test
+ // Can't actually test this cause process_act_two requires you pass the right length!
+ }
+ {
+ // transport-initiator act2 bad version test
+ let mut outbound_peer = get_outbound_peer_for_initiator_test_vectors();
+
+ let act_two = hex::decode("0102466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap().to_vec();
+ assert!(outbound_peer.process_act_two(&act_two[..], &our_node_id).is_err());
+ }
+
+ {
+ // transport-initiator act2 bad key serialization test
+ let mut outbound_peer = get_outbound_peer_for_initiator_test_vectors();
+
+ let act_two = hex::decode("0004466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap().to_vec();
+ assert!(outbound_peer.process_act_two(&act_two[..], &our_node_id).is_err());
+ }
+
+ {
+ // transport-initiator act2 bad MAC test
+ let mut outbound_peer = get_outbound_peer_for_initiator_test_vectors();
+
+ let act_two = hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730af").unwrap().to_vec();
+ assert!(outbound_peer.process_act_two(&act_two[..], &our_node_id).is_err());
+ }
+ }
+
+ #[test]
+ fn noise_responder_test_vectors() {
+ let our_node_id = SecretKey::from_slice(&hex::decode("2121212121212121212121212121212121212121212121212121212121212121").unwrap()[..]).unwrap();
+ let our_ephemeral = SecretKey::from_slice(&hex::decode("2222222222222222222222222222222222222222222222222222222222222222").unwrap()[..]).unwrap();
+
+ {
+ // transport-responder successful handshake
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert_eq!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
+
+ let act_three = hex::decode("00b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap().to_vec();
+ // test vector doesn't specify the initiator static key, but it's the same as the one
+ // from transport-initiator successful handshake
+ assert_eq!(inbound_peer.process_act_three(&act_three[..]).unwrap().serialize()[..], hex::decode("034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa").unwrap()[..]);
+
+ match inbound_peer.noise_state {
+ NoiseState::Finished { sk, sn, sck, rk, rn, rck } => {
+ assert_eq!(sk, hex::decode("bb9020b8965f4df047e07f955f3c4b88418984aadc5cdb35096b9ea8fa5c3442").unwrap()[..]);
+ assert_eq!(sn, 0);
+ assert_eq!(sck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ assert_eq!(rk, hex::decode("969ab31b4d288cedf6218839b27a3e2140827047f2c0f01bf5c04435d43511a9").unwrap()[..]);
+ assert_eq!(rn, 0);
+ assert_eq!(rck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ },
+ _ => panic!()
+ }
+ }
+ {
+ // transport-responder act1 short read test
+ // Can't actually test this cause process_act_one requires you pass the right length!
+ }
+ {
+ // transport-responder act1 bad version test
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("01036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).is_err());
+ }
+ {
+ // transport-responder act1 bad key serialization test
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one =hex::decode("00046360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).is_err());
+ }
+ {
+ // transport-responder act1 bad MAC test
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6b").unwrap().to_vec();
+ assert!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).is_err());
+ }
+ {
+ // transport-responder act3 bad version test
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert_eq!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
+
+ let act_three = hex::decode("01b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap().to_vec();
+ assert!(inbound_peer.process_act_three(&act_three[..]).is_err());
+ }
+ {
+ // transport-responder act3 short read test
+ // Can't actually test this cause process_act_three requires you pass the right length!
+ }
+ {
+ // transport-responder act3 bad MAC for ciphertext test
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert_eq!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
+
+ let act_three = hex::decode("00c9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap().to_vec();
+ assert!(inbound_peer.process_act_three(&act_three[..]).is_err());
+ }
+ {
+ // transport-responder act3 bad rs test
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert_eq!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
+
+ let act_three = hex::decode("00bfe3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa2235536ad09a8ee351870c2bb7f78b754a26c6cef79a98d25139c856d7efd252c2ae73c").unwrap().to_vec();
+ assert!(inbound_peer.process_act_three(&act_three[..]).is_err());
+ }
+ {
+ // transport-responder act3 bad MAC test
+ let mut inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert_eq!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
+
+ let act_three = hex::decode("00b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139bb").unwrap().to_vec();
+ assert!(inbound_peer.process_act_three(&act_three[..]).is_err());
+ }
+ }
+
+
+ #[test]
+ fn message_encryption_decryption_test_vectors() {
+ // We use the same keys as the initiator and responder test vectors, so we copy those tests
+ // here and use them to encrypt.
+ let mut outbound_peer = get_outbound_peer_for_initiator_test_vectors();
+
+ {
+ let our_node_id = SecretKey::from_slice(&hex::decode("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap();
+
+ let act_two = hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap().to_vec();
+ assert_eq!(outbound_peer.process_act_two(&act_two[..], &our_node_id).unwrap().0[..], hex::decode("00b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap()[..]);
+
+ match outbound_peer.noise_state {
+ NoiseState::Finished { sk, sn, sck, rk, rn, rck } => {
+ assert_eq!(sk, hex::decode("969ab31b4d288cedf6218839b27a3e2140827047f2c0f01bf5c04435d43511a9").unwrap()[..]);
+ assert_eq!(sn, 0);
+ assert_eq!(sck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ assert_eq!(rk, hex::decode("bb9020b8965f4df047e07f955f3c4b88418984aadc5cdb35096b9ea8fa5c3442").unwrap()[..]);
+ assert_eq!(rn, 0);
+ assert_eq!(rck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ },
+ _ => panic!()
+ }
+ }
+
+ let mut inbound_peer;
+
+ {
+ // transport-responder successful handshake
+ let our_node_id = SecretKey::from_slice(&hex::decode("2121212121212121212121212121212121212121212121212121212121212121").unwrap()[..]).unwrap();
+ let our_ephemeral = SecretKey::from_slice(&hex::decode("2222222222222222222222222222222222222222222222222222222222222222").unwrap()[..]).unwrap();
+
+ inbound_peer = PeerChannelEncryptor::new_inbound(&our_node_id);
+
+ let act_one = hex::decode("00036360e856310ce5d294e8be33fc807077dc56ac80d95d9cd4ddbd21325eff73f70df6086551151f58b8afe6c195782c6a").unwrap().to_vec();
+ assert_eq!(inbound_peer.process_act_one_with_keys(&act_one[..], &our_node_id, our_ephemeral.clone()).unwrap()[..], hex::decode("0002466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f276e2470b93aac583c9ef6eafca3f730ae").unwrap()[..]);
+
+ let act_three = hex::decode("00b9e3a702e93e3a9948c2ed6e5fd7590a6e1c3a0344cfc9d5b57357049aa22355361aa02e55a8fc28fef5bd6d71ad0c38228dc68b1c466263b47fdf31e560e139ba").unwrap().to_vec();
+ // test vector doesn't specify the initiator static key, but it's the same as the one
+ // from transport-initiator successful handshake
+ assert_eq!(inbound_peer.process_act_three(&act_three[..]).unwrap().serialize()[..], hex::decode("034f355bdcb7cc0af728ef3cceb9615d90684bb5b2ca5f859ab0f0b704075871aa").unwrap()[..]);
+
+ match inbound_peer.noise_state {
+ NoiseState::Finished { sk, sn, sck, rk, rn, rck } => {
+ assert_eq!(sk, hex::decode("bb9020b8965f4df047e07f955f3c4b88418984aadc5cdb35096b9ea8fa5c3442").unwrap()[..]);
+ assert_eq!(sn, 0);
+ assert_eq!(sck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ assert_eq!(rk, hex::decode("969ab31b4d288cedf6218839b27a3e2140827047f2c0f01bf5c04435d43511a9").unwrap()[..]);
+ assert_eq!(rn, 0);
+ assert_eq!(rck, hex::decode("919219dbb2920afa8db80f9a51787a840bcf111ed8d588caf9ab4be716e42b01").unwrap()[..]);
+ },
+ _ => panic!()
+ }
+ }
+
+ for i in 0..1005 {
+ let msg = [0x68, 0x65, 0x6c, 0x6c, 0x6f];
+ let res = outbound_peer.encrypt_message(&msg);
+ assert_eq!(res.len(), 5 + 2*16 + 2);
+
+ let len_header = res[0..2+16].to_vec();
+ assert_eq!(inbound_peer.decrypt_length_header(&len_header[..]).unwrap() as usize, msg.len());
+ assert_eq!(inbound_peer.decrypt_message(&res[2+16..]).unwrap()[..], msg[..]);
+
+ if i == 0 {
+ assert_eq!(res, hex::decode("cf2b30ddf0cf3f80e7c35a6e6730b59fe802473180f396d88a8fb0db8cbcf25d2f214cf9ea1d95").unwrap());
+ } else if i == 1 {
+ assert_eq!(res, hex::decode("72887022101f0b6753e0c7de21657d35a4cb2a1f5cde2650528bbc8f837d0f0d7ad833b1a256a1").unwrap());
+ } else if i == 500 {
+ assert_eq!(res, hex::decode("178cb9d7387190fa34db9c2d50027d21793c9bc2d40b1e14dcf30ebeeeb220f48364f7a4c68bf8").unwrap());
+ } else if i == 501 {
+ assert_eq!(res, hex::decode("1b186c57d44eb6de4c057c49940d79bb838a145cb528d6e8fd26dbe50a60ca2c104b56b60e45bd").unwrap());
+ } else if i == 1000 {
+ assert_eq!(res, hex::decode("4a2f3cc3b5e78ddb83dcb426d9863d9d9a723b0337c89dd0b005d89f8d3c05c52b76b29b740f09").unwrap());
+ } else if i == 1001 {
+ assert_eq!(res, hex::decode("2ecd8c8a5629d0d02ab457a0fdd0f7b90a192cd46be5ecb6ca570bfc5e268338b1a16cf4ef2d36").unwrap());
+ }
+ }
+ }
+}
--- /dev/null
- if let Some(action) = e.action {
- match action {
- msgs::ErrorAction::DisconnectPeer { msg: _ } => {
- //TODO: Try to push msg
- log_trace!(self, "Got Err handling message, disconnecting peer because {}", e.err);
- return Err(PeerHandleError{ no_connection_possible: false });
- },
- msgs::ErrorAction::IgnoreError => {
- log_trace!(self, "Got Err handling message, ignoring because {}", e.err);
- continue;
- },
- msgs::ErrorAction::SendErrorMessage { msg } => {
- log_trace!(self, "Got Err handling message, sending Error message because {}", e.err);
- encode_and_send_msg!(msg, 17);
- continue;
- },
- }
- } else {
- log_debug!(self, "Got Err handling message, action not yet filled in: {}", e.err);
- return Err(PeerHandleError{ no_connection_possible: false });
+//! Top level peer message handling and socket handling logic lives here.
+//!
+//! Instead of actually servicing sockets ourselves we require that you implement the
+//! SocketDescriptor interface and use that to receive actions which you should perform on the
+//! socket, and call into PeerManager with bytes read from the socket. The PeerManager will then
+//! call into the provided message handlers (probably a ChannelManager and Router) with messages
+//! they should handle, and encoding/sending response messages.
+
+use secp256k1::key::{SecretKey,PublicKey};
+
+use ln::msgs;
+use util::ser::{Writeable, Writer, Readable};
+use ln::peer_channel_encryptor::{PeerChannelEncryptor,NextNoiseStep};
+use util::byte_utils;
+use util::events::{MessageSendEvent};
+use util::logger::Logger;
+
+use std::collections::{HashMap,hash_map,HashSet,LinkedList};
+use std::sync::{Arc, Mutex};
+use std::sync::atomic::{AtomicUsize, Ordering};
+use std::{cmp,error,hash,fmt};
+
+use bitcoin_hashes::sha256::Hash as Sha256;
+use bitcoin_hashes::sha256::HashEngine as Sha256Engine;
+use bitcoin_hashes::{HashEngine, Hash};
+
+/// Provides references to trait impls which handle different types of messages.
+pub struct MessageHandler {
+ /// A message handler which handles messages specific to channels. Usually this is just a
+ /// ChannelManager object.
+ pub chan_handler: Arc<msgs::ChannelMessageHandler>,
+ /// A message handler which handles messages updating our knowledge of the network channel
+ /// graph. Usually this is just a Router object.
+ pub route_handler: Arc<msgs::RoutingMessageHandler>,
+}
+
+/// Provides an object which can be used to send data to and which uniquely identifies a connection
+/// to a remote host. You will need to be able to generate multiple of these which meet Eq and
+/// implement Hash to meet the PeerManager API.
+///
+/// For efficiency, Clone should be relatively cheap for this type.
+///
+/// You probably want to just extend an int and put a file descriptor in a struct and implement
+/// send_data. Note that if you are using a higher-level net library that may close() itself, be
+/// careful to ensure you don't have races whereby you might register a new connection with an fd
+/// the same as a yet-to-be-disconnect_event()-ed.
+pub trait SocketDescriptor : cmp::Eq + hash::Hash + Clone {
+ /// Attempts to send some data from the given slice to the peer.
+ ///
+ /// Returns the amount of data which was sent, possibly 0 if the socket has since disconnected.
+ /// Note that in the disconnected case, a disconnect_event must still fire and further write
+ /// attempts may occur until that time.
+ ///
+ /// If the returned size is smaller than data.len(), a write_available event must
+ /// trigger the next time more data can be written. Additionally, until the a send_data event
+ /// completes fully, no further read_events should trigger on the same peer!
+ ///
+ /// If a read_event on this descriptor had previously returned true (indicating that read
+ /// events should be paused to prevent DoS in the send buffer), resume_read may be set
+ /// indicating that read events on this descriptor should resume. A resume_read of false does
+ /// *not* imply that further read events should be paused.
+ fn send_data(&mut self, data: &[u8], resume_read: bool) -> usize;
+ /// Disconnect the socket pointed to by this SocketDescriptor. Once this function returns, no
+ /// more calls to write_event, read_event or disconnect_event may be made with this descriptor.
+ /// No disconnect_event should be generated as a result of this call, though obviously races
+ /// may occur whereby disconnect_socket is called after a call to disconnect_event but prior to
+ /// that event completing.
+ fn disconnect_socket(&mut self);
+}
+
+/// Error for PeerManager errors. If you get one of these, you must disconnect the socket and
+/// generate no further read/write_events for the descriptor, only triggering a single
+/// disconnect_event (unless it was provided in response to a new_*_connection event, in which case
+/// no such disconnect_event must be generated and the socket be silently disconencted).
+pub struct PeerHandleError {
+ /// Used to indicate that we probably can't make any future connections to this peer, implying
+ /// we should go ahead and force-close any channels we have with it.
+ no_connection_possible: bool,
+}
+impl fmt::Debug for PeerHandleError {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ formatter.write_str("Peer Sent Invalid Data")
+ }
+}
+impl fmt::Display for PeerHandleError {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> Result<(), fmt::Error> {
+ formatter.write_str("Peer Sent Invalid Data")
+ }
+}
+impl error::Error for PeerHandleError {
+ fn description(&self) -> &str {
+ "Peer Sent Invalid Data"
+ }
+}
+
+enum InitSyncTracker{
+ NoSyncRequested,
+ ChannelsSyncing(u64),
+ NodesSyncing(PublicKey),
+}
+
+struct Peer {
+ channel_encryptor: PeerChannelEncryptor,
+ outbound: bool,
+ their_node_id: Option<PublicKey>,
+ their_global_features: Option<msgs::GlobalFeatures>,
+ their_local_features: Option<msgs::LocalFeatures>,
+
+ pending_outbound_buffer: LinkedList<Vec<u8>>,
+ pending_outbound_buffer_first_msg_offset: usize,
+ awaiting_write_event: bool,
+
+ pending_read_buffer: Vec<u8>,
+ pending_read_buffer_pos: usize,
+ pending_read_is_header: bool,
+
+ sync_status: InitSyncTracker,
+}
+
+impl Peer {
+ /// Returns true if the channel announcements/updates for the given channel should be
+ /// forwarded to this peer.
+ /// If we are sending our routing table to this peer and we have not yet sent channel
+ /// announcements/updates for the given channel_id then we will send it when we get to that
+ /// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already
+ /// sent the old versions, we should send the update, and so return true here.
+ fn should_forward_channel(&self, channel_id: u64)->bool{
+ match self.sync_status {
+ InitSyncTracker::NoSyncRequested => true,
+ InitSyncTracker::ChannelsSyncing(i) => i < channel_id,
+ InitSyncTracker::NodesSyncing(_) => true,
+ }
+ }
+}
+
+struct PeerHolder<Descriptor: SocketDescriptor> {
+ peers: HashMap<Descriptor, Peer>,
+ /// Added to by do_read_event for cases where we pushed a message onto the send buffer but
+ /// didn't call do_attempt_write_data to avoid reentrancy. Cleared in process_events()
+ peers_needing_send: HashSet<Descriptor>,
+ /// Only add to this set when noise completes:
+ node_id_to_descriptor: HashMap<PublicKey, Descriptor>,
+}
+struct MutPeerHolder<'a, Descriptor: SocketDescriptor + 'a> {
+ peers: &'a mut HashMap<Descriptor, Peer>,
+ peers_needing_send: &'a mut HashSet<Descriptor>,
+ node_id_to_descriptor: &'a mut HashMap<PublicKey, Descriptor>,
+}
+impl<Descriptor: SocketDescriptor> PeerHolder<Descriptor> {
+ fn borrow_parts(&mut self) -> MutPeerHolder<Descriptor> {
+ MutPeerHolder {
+ peers: &mut self.peers,
+ peers_needing_send: &mut self.peers_needing_send,
+ node_id_to_descriptor: &mut self.node_id_to_descriptor,
+ }
+ }
+}
+
+#[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))]
+fn _check_usize_is_32_or_64() {
+ // See below, less than 32 bit pointers may be unsafe here!
+ unsafe { mem::transmute::<*const usize, [u8; 4]>(panic!()); }
+}
+
+/// A PeerManager manages a set of peers, described by their SocketDescriptor and marshalls socket
+/// events into messages which it passes on to its MessageHandlers.
+pub struct PeerManager<Descriptor: SocketDescriptor> {
+ message_handler: MessageHandler,
+ peers: Mutex<PeerHolder<Descriptor>>,
+ our_node_secret: SecretKey,
+ ephemeral_key_midstate: Sha256Engine,
+
+ // Usize needs to be at least 32 bits to avoid overflowing both low and high. If usize is 64
+ // bits we will never realistically count into high:
+ peer_counter_low: AtomicUsize,
+ peer_counter_high: AtomicUsize,
+
+ initial_syncs_sent: AtomicUsize,
+ logger: Arc<Logger>,
+}
+
+struct VecWriter(Vec<u8>);
+impl Writer for VecWriter {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ self.0.extend_from_slice(buf);
+ Ok(())
+ }
+ fn size_hint(&mut self, size: usize) {
+ self.0.reserve_exact(size);
+ }
+}
+
+macro_rules! encode_msg {
+ ($msg: expr, $msg_code: expr) => {{
+ let mut msg = VecWriter(Vec::new());
+ ($msg_code as u16).write(&mut msg).unwrap();
+ $msg.write(&mut msg).unwrap();
+ msg.0
+ }}
+}
+
+//TODO: Really should do something smarter for this
+const INITIAL_SYNCS_TO_SEND: usize = 5;
+
+/// Manages and reacts to connection events. You probably want to use file descriptors as PeerIds.
+/// PeerIds may repeat, but only after disconnect_event() has been called.
+impl<Descriptor: SocketDescriptor> PeerManager<Descriptor> {
+ /// Constructs a new PeerManager with the given message handlers and node_id secret key
+ /// ephemeral_random_data is used to derive per-connection ephemeral keys and must be
+ /// cryptographically secure random bytes.
+ pub fn new(message_handler: MessageHandler, our_node_secret: SecretKey, ephemeral_random_data: &[u8; 32], logger: Arc<Logger>) -> PeerManager<Descriptor> {
+ let mut ephemeral_key_midstate = Sha256::engine();
+ ephemeral_key_midstate.input(ephemeral_random_data);
+
+ PeerManager {
+ message_handler: message_handler,
+ peers: Mutex::new(PeerHolder {
+ peers: HashMap::new(),
+ peers_needing_send: HashSet::new(),
+ node_id_to_descriptor: HashMap::new()
+ }),
+ our_node_secret: our_node_secret,
+ ephemeral_key_midstate,
+ peer_counter_low: AtomicUsize::new(0),
+ peer_counter_high: AtomicUsize::new(0),
+ initial_syncs_sent: AtomicUsize::new(0),
+ logger,
+ }
+ }
+
+ /// Get the list of node ids for peers which have completed the initial handshake.
+ ///
+ /// For outbound connections, this will be the same as the their_node_id parameter passed in to
+ /// new_outbound_connection, however entries will only appear once the initial handshake has
+ /// completed and we are sure the remote peer has the private key for the given node_id.
+ pub fn get_peer_node_ids(&self) -> Vec<PublicKey> {
+ let peers = self.peers.lock().unwrap();
+ peers.peers.values().filter_map(|p| {
+ if !p.channel_encryptor.is_ready_for_encryption() || p.their_global_features.is_none() {
+ return None;
+ }
+ p.their_node_id
+ }).collect()
+ }
+
+ fn get_ephemeral_key(&self) -> SecretKey {
+ let mut ephemeral_hash = self.ephemeral_key_midstate.clone();
+ let low = self.peer_counter_low.fetch_add(1, Ordering::AcqRel);
+ let high = if low == 0 {
+ self.peer_counter_high.fetch_add(1, Ordering::AcqRel)
+ } else {
+ self.peer_counter_high.load(Ordering::Acquire)
+ };
+ ephemeral_hash.input(&byte_utils::le64_to_array(low as u64));
+ ephemeral_hash.input(&byte_utils::le64_to_array(high as u64));
+ SecretKey::from_slice(&Sha256::from_engine(ephemeral_hash).into_inner()).expect("You broke SHA-256!")
+ }
+
+ /// Indicates a new outbound connection has been established to a node with the given node_id.
+ /// Note that if an Err is returned here you MUST NOT call disconnect_event for the new
+ /// descriptor but must disconnect the connection immediately.
+ ///
+ /// Returns a small number of bytes to send to the remote node (currently always 50).
+ ///
+ /// Panics if descriptor is duplicative with some other descriptor which has not yet has a
+ /// disconnect_event.
+ pub fn new_outbound_connection(&self, their_node_id: PublicKey, descriptor: Descriptor) -> Result<Vec<u8>, PeerHandleError> {
+ let mut peer_encryptor = PeerChannelEncryptor::new_outbound(their_node_id.clone(), self.get_ephemeral_key());
+ let res = peer_encryptor.get_act_one().to_vec();
+ let pending_read_buffer = [0; 50].to_vec(); // Noise act two is 50 bytes
+
+ let mut peers = self.peers.lock().unwrap();
+ if peers.peers.insert(descriptor, Peer {
+ channel_encryptor: peer_encryptor,
+ outbound: true,
+ their_node_id: None,
+ their_global_features: None,
+ their_local_features: None,
+
+ pending_outbound_buffer: LinkedList::new(),
+ pending_outbound_buffer_first_msg_offset: 0,
+ awaiting_write_event: false,
+
+ pending_read_buffer: pending_read_buffer,
+ pending_read_buffer_pos: 0,
+ pending_read_is_header: false,
+
+ sync_status: InitSyncTracker::NoSyncRequested,
+ }).is_some() {
+ panic!("PeerManager driver duplicated descriptors!");
+ };
+ Ok(res)
+ }
+
+ /// Indicates a new inbound connection has been established.
+ ///
+ /// May refuse the connection by returning an Err, but will never write bytes to the remote end
+ /// (outbound connector always speaks first). Note that if an Err is returned here you MUST NOT
+ /// call disconnect_event for the new descriptor but must disconnect the connection
+ /// immediately.
+ ///
+ /// Panics if descriptor is duplicative with some other descriptor which has not yet has a
+ /// disconnect_event.
+ pub fn new_inbound_connection(&self, descriptor: Descriptor) -> Result<(), PeerHandleError> {
+ let peer_encryptor = PeerChannelEncryptor::new_inbound(&self.our_node_secret);
+ let pending_read_buffer = [0; 50].to_vec(); // Noise act one is 50 bytes
+
+ let mut peers = self.peers.lock().unwrap();
+ if peers.peers.insert(descriptor, Peer {
+ channel_encryptor: peer_encryptor,
+ outbound: false,
+ their_node_id: None,
+ their_global_features: None,
+ their_local_features: None,
+
+ pending_outbound_buffer: LinkedList::new(),
+ pending_outbound_buffer_first_msg_offset: 0,
+ awaiting_write_event: false,
+
+ pending_read_buffer: pending_read_buffer,
+ pending_read_buffer_pos: 0,
+ pending_read_is_header: false,
+
+ sync_status: InitSyncTracker::NoSyncRequested,
+ }).is_some() {
+ panic!("PeerManager driver duplicated descriptors!");
+ };
+ Ok(())
+ }
+
+ fn do_attempt_write_data(&self, descriptor: &mut Descriptor, peer: &mut Peer) {
+ macro_rules! encode_and_send_msg {
+ ($msg: expr, $msg_code: expr) => {
+ {
+ log_trace!(self, "Encoding and sending sync update message of type {} to {}", $msg_code, log_pubkey!(peer.their_node_id.unwrap()));
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!($msg, $msg_code)[..]));
+ }
+ }
+ }
+ const MSG_BUFF_SIZE: usize = 10;
+ while !peer.awaiting_write_event {
+ if peer.pending_outbound_buffer.len() < MSG_BUFF_SIZE {
+ match peer.sync_status {
+ InitSyncTracker::NoSyncRequested => {},
+ InitSyncTracker::ChannelsSyncing(c) if c < 0xffff_ffff_ffff_ffff => {
+ let steps = ((MSG_BUFF_SIZE - peer.pending_outbound_buffer.len() + 2) / 3) as u8;
+ let all_messages = self.message_handler.route_handler.get_next_channel_announcements(0, steps);
+ for &(ref announce, ref update_a, ref update_b) in all_messages.iter() {
+ encode_and_send_msg!(announce, 256);
+ encode_and_send_msg!(update_a, 258);
+ encode_and_send_msg!(update_b, 258);
+ peer.sync_status = InitSyncTracker::ChannelsSyncing(announce.contents.short_channel_id + 1);
+ }
+ if all_messages.is_empty() || all_messages.len() != steps as usize {
+ peer.sync_status = InitSyncTracker::ChannelsSyncing(0xffff_ffff_ffff_ffff);
+ }
+ },
+ InitSyncTracker::ChannelsSyncing(c) if c == 0xffff_ffff_ffff_ffff => {
+ let steps = (MSG_BUFF_SIZE - peer.pending_outbound_buffer.len()) as u8;
+ let all_messages = self.message_handler.route_handler.get_next_node_announcements(None, steps);
+ for msg in all_messages.iter() {
+ encode_and_send_msg!(msg, 256);
+ peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
+ }
+ if all_messages.is_empty() || all_messages.len() != steps as usize {
+ peer.sync_status = InitSyncTracker::NoSyncRequested;
+ }
+ },
+ InitSyncTracker::ChannelsSyncing(_) => unreachable!(),
+ InitSyncTracker::NodesSyncing(key) => {
+ let steps = (MSG_BUFF_SIZE - peer.pending_outbound_buffer.len()) as u8;
+ let all_messages = self.message_handler.route_handler.get_next_node_announcements(Some(&key), steps);
+ for msg in all_messages.iter() {
+ encode_and_send_msg!(msg, 256);
+ peer.sync_status = InitSyncTracker::NodesSyncing(msg.contents.node_id);
+ }
+ if all_messages.is_empty() || all_messages.len() != steps as usize {
+ peer.sync_status = InitSyncTracker::NoSyncRequested;
+ }
+ },
+ }
+ }
+
+ if {
+ let next_buff = match peer.pending_outbound_buffer.front() {
+ None => return,
+ Some(buff) => buff,
+ };
+
+ let should_be_reading = peer.pending_outbound_buffer.len() < MSG_BUFF_SIZE;
+ let pending = &next_buff[peer.pending_outbound_buffer_first_msg_offset..];
+ let data_sent = descriptor.send_data(pending, should_be_reading);
+ peer.pending_outbound_buffer_first_msg_offset += data_sent;
+ if peer.pending_outbound_buffer_first_msg_offset == next_buff.len() { true } else { false }
+ } {
+ peer.pending_outbound_buffer_first_msg_offset = 0;
+ peer.pending_outbound_buffer.pop_front();
+ } else {
+ peer.awaiting_write_event = true;
+ }
+ }
+ }
+
+ /// Indicates that there is room to write data to the given socket descriptor.
+ ///
+ /// May return an Err to indicate that the connection should be closed.
+ ///
+ /// Will most likely call send_data on the descriptor passed in (or the descriptor handed into
+ /// new_*\_connection) before returning. Thus, be very careful with reentrancy issues! The
+ /// invariants around calling write_event in case a write did not fully complete must still
+ /// hold - be ready to call write_event again if a write call generated here isn't sufficient!
+ /// Panics if the descriptor was not previously registered in a new_\*_connection event.
+ pub fn write_event(&self, descriptor: &mut Descriptor) -> Result<(), PeerHandleError> {
+ let mut peers = self.peers.lock().unwrap();
+ match peers.peers.get_mut(descriptor) {
+ None => panic!("Descriptor for write_event is not already known to PeerManager"),
+ Some(peer) => {
+ peer.awaiting_write_event = false;
+ self.do_attempt_write_data(descriptor, peer);
+ }
+ };
+ Ok(())
+ }
+
+ /// Indicates that data was read from the given socket descriptor.
+ ///
+ /// May return an Err to indicate that the connection should be closed.
+ ///
+ /// Will *not* call back into send_data on any descriptors to avoid reentrancy complexity.
+ /// Thus, however, you almost certainly want to call process_events() after any read_event to
+ /// generate send_data calls to handle responses.
+ ///
+ /// If Ok(true) is returned, further read_events should not be triggered until a write_event on
+ /// this file descriptor has resume_read set (preventing DoS issues in the send buffer).
+ ///
+ /// Panics if the descriptor was not previously registered in a new_*_connection event.
+ pub fn read_event(&self, peer_descriptor: &mut Descriptor, data: Vec<u8>) -> Result<bool, PeerHandleError> {
+ match self.do_read_event(peer_descriptor, data) {
+ Ok(res) => Ok(res),
+ Err(e) => {
+ self.disconnect_event_internal(peer_descriptor, e.no_connection_possible);
+ Err(e)
+ }
+ }
+ }
+
+ fn do_read_event(&self, peer_descriptor: &mut Descriptor, data: Vec<u8>) -> Result<bool, PeerHandleError> {
+ let pause_read = {
+ let mut peers_lock = self.peers.lock().unwrap();
+ let peers = peers_lock.borrow_parts();
+ let pause_read = match peers.peers.get_mut(peer_descriptor) {
+ None => panic!("Descriptor for read_event is not already known to PeerManager"),
+ Some(peer) => {
+ assert!(peer.pending_read_buffer.len() > 0);
+ assert!(peer.pending_read_buffer.len() > peer.pending_read_buffer_pos);
+
+ let mut read_pos = 0;
+ while read_pos < data.len() {
+ {
+ let data_to_copy = cmp::min(peer.pending_read_buffer.len() - peer.pending_read_buffer_pos, data.len() - read_pos);
+ peer.pending_read_buffer[peer.pending_read_buffer_pos..peer.pending_read_buffer_pos + data_to_copy].copy_from_slice(&data[read_pos..read_pos + data_to_copy]);
+ read_pos += data_to_copy;
+ peer.pending_read_buffer_pos += data_to_copy;
+ }
+
+ if peer.pending_read_buffer_pos == peer.pending_read_buffer.len() {
+ peer.pending_read_buffer_pos = 0;
+
+ macro_rules! encode_and_send_msg {
+ ($msg: expr, $msg_code: expr) => {
+ {
+ log_trace!(self, "Encoding and sending message of type {} to {}", $msg_code, log_pubkey!(peer.their_node_id.unwrap()));
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!($msg, $msg_code)[..]));
+ peers.peers_needing_send.insert(peer_descriptor.clone());
+ }
+ }
+ }
+
+ macro_rules! try_potential_handleerror {
+ ($thing: expr) => {
+ match $thing {
+ Ok(x) => x,
+ Err(e) => {
- if let Some(ref action) = *action {
- match *action {
- msgs::ErrorAction::DisconnectPeer { ref msg } => {
- if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) {
- peers.peers_needing_send.remove(&descriptor);
- if let Some(mut peer) = peers.peers.remove(&descriptor) {
- if let Some(ref msg) = *msg {
- log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
- log_pubkey!(node_id),
- msg.data);
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
- // This isn't guaranteed to work, but if there is enough free
- // room in the send buffer, put the error message there...
- self.do_attempt_write_data(&mut descriptor, &mut peer);
- } else {
- log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
- }
++ match e.action {
++ msgs::ErrorAction::DisconnectPeer { msg: _ } => {
++ //TODO: Try to push msg
++ log_trace!(self, "Got Err handling message, disconnecting peer because {}", e.err);
++ return Err(PeerHandleError{ no_connection_possible: false });
++ },
++ msgs::ErrorAction::IgnoreError => {
++ log_trace!(self, "Got Err handling message, ignoring because {}", e.err);
++ continue;
++ },
++ msgs::ErrorAction::SendErrorMessage { msg } => {
++ log_trace!(self, "Got Err handling message, sending Error message because {}", e.err);
++ encode_and_send_msg!(msg, 17);
++ continue;
++ },
+ }
+ }
+ };
+ }
+ }
+
+ macro_rules! try_potential_decodeerror {
+ ($thing: expr) => {
+ match $thing {
+ Ok(x) => x,
+ Err(e) => {
+ match e {
+ msgs::DecodeError::UnknownVersion => return Err(PeerHandleError{ no_connection_possible: false }),
+ msgs::DecodeError::UnknownRequiredFeature => {
+ log_debug!(self, "Got a channel/node announcement with an known required feature flag, you may want to update!");
+ continue;
+ },
+ msgs::DecodeError::InvalidValue => {
+ log_debug!(self, "Got an invalid value while deserializing message");
+ return Err(PeerHandleError{ no_connection_possible: false });
+ },
+ msgs::DecodeError::ShortRead => {
+ log_debug!(self, "Deserialization failed due to shortness of message");
+ return Err(PeerHandleError{ no_connection_possible: false });
+ },
+ msgs::DecodeError::ExtraAddressesPerType => {
+ log_debug!(self, "Error decoding message, ignoring due to lnd spec incompatibility. See https://github.com/lightningnetwork/lnd/issues/1407");
+ continue;
+ },
+ msgs::DecodeError::BadLengthDescriptor => return Err(PeerHandleError{ no_connection_possible: false }),
+ msgs::DecodeError::Io(_) => return Err(PeerHandleError{ no_connection_possible: false }),
+ }
+ }
+ };
+ }
+ }
+
+ macro_rules! insert_node_id {
+ () => {
+ match peers.node_id_to_descriptor.entry(peer.their_node_id.unwrap()) {
+ hash_map::Entry::Occupied(_) => {
+ log_trace!(self, "Got second connection with {}, closing", log_pubkey!(peer.their_node_id.unwrap()));
+ peer.their_node_id = None; // Unset so that we don't generate a peer_disconnected event
+ return Err(PeerHandleError{ no_connection_possible: false })
+ },
+ hash_map::Entry::Vacant(entry) => {
+ log_trace!(self, "Finished noise handshake for connection with {}", log_pubkey!(peer.their_node_id.unwrap()));
+ entry.insert(peer_descriptor.clone())
+ },
+ };
+ }
+ }
+
+ let next_step = peer.channel_encryptor.get_noise_step();
+ match next_step {
+ NextNoiseStep::ActOne => {
+ let act_two = try_potential_handleerror!(peer.channel_encryptor.process_act_one_with_keys(&peer.pending_read_buffer[..], &self.our_node_secret, self.get_ephemeral_key())).to_vec();
+ peer.pending_outbound_buffer.push_back(act_two);
+ peer.pending_read_buffer = [0; 66].to_vec(); // act three is 66 bytes long
+ },
+ NextNoiseStep::ActTwo => {
+ let (act_three, their_node_id) = try_potential_handleerror!(peer.channel_encryptor.process_act_two(&peer.pending_read_buffer[..], &self.our_node_secret));
+ peer.pending_outbound_buffer.push_back(act_three.to_vec());
+ peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
+ peer.pending_read_is_header = true;
+
+ peer.their_node_id = Some(their_node_id);
+ insert_node_id!();
+ let mut local_features = msgs::LocalFeatures::new();
+ if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND {
+ self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel);
+ local_features.set_initial_routing_sync();
+ }
+ encode_and_send_msg!(msgs::Init {
+ global_features: msgs::GlobalFeatures::new(),
+ local_features,
+ }, 16);
+ },
+ NextNoiseStep::ActThree => {
+ let their_node_id = try_potential_handleerror!(peer.channel_encryptor.process_act_three(&peer.pending_read_buffer[..]));
+ peer.pending_read_buffer = [0; 18].to_vec(); // Message length header is 18 bytes
+ peer.pending_read_is_header = true;
+ peer.their_node_id = Some(their_node_id);
+ insert_node_id!();
+ },
+ NextNoiseStep::NoiseComplete => {
+ if peer.pending_read_is_header {
+ let msg_len = try_potential_handleerror!(peer.channel_encryptor.decrypt_length_header(&peer.pending_read_buffer[..]));
+ peer.pending_read_buffer = Vec::with_capacity(msg_len as usize + 16);
+ peer.pending_read_buffer.resize(msg_len as usize + 16, 0);
+ if msg_len < 2 { // Need at least the message type tag
+ return Err(PeerHandleError{ no_connection_possible: false });
+ }
+ peer.pending_read_is_header = false;
+ } else {
+ let msg_data = try_potential_handleerror!(peer.channel_encryptor.decrypt_message(&peer.pending_read_buffer[..]));
+ assert!(msg_data.len() >= 2);
+
+ // Reset read buffer
+ peer.pending_read_buffer = [0; 18].to_vec();
+ peer.pending_read_is_header = true;
+
+ let msg_type = byte_utils::slice_to_be16(&msg_data[0..2]);
+ log_trace!(self, "Received message of type {} from {}", msg_type, log_pubkey!(peer.their_node_id.unwrap()));
+ if msg_type != 16 && peer.their_global_features.is_none() {
+ // Need an init message as first message
+ log_trace!(self, "Peer {} sent non-Init first message", log_pubkey!(peer.their_node_id.unwrap()));
+ return Err(PeerHandleError{ no_connection_possible: false });
+ }
+ let mut reader = ::std::io::Cursor::new(&msg_data[2..]);
+ match msg_type {
+ // Connection control:
+ 16 => {
+ let msg = try_potential_decodeerror!(msgs::Init::read(&mut reader));
+ if msg.global_features.requires_unknown_bits() {
+ log_info!(self, "Peer global features required unknown version bits");
+ return Err(PeerHandleError{ no_connection_possible: true });
+ }
+ if msg.local_features.requires_unknown_bits() {
+ log_info!(self, "Peer local features required unknown version bits");
+ return Err(PeerHandleError{ no_connection_possible: true });
+ }
+ if peer.their_global_features.is_some() {
+ return Err(PeerHandleError{ no_connection_possible: false });
+ }
+
+ log_info!(self, "Received peer Init message: data_loss_protect: {}, initial_routing_sync: {}, upfront_shutdown_script: {}, unkown local flags: {}, unknown global flags: {}",
+ if msg.local_features.supports_data_loss_protect() { "supported" } else { "not supported"},
+ if msg.local_features.initial_routing_sync() { "requested" } else { "not requested" },
+ if msg.local_features.supports_upfront_shutdown_script() { "supported" } else { "not supported"},
+ if msg.local_features.supports_unknown_bits() { "present" } else { "none" },
+ if msg.global_features.supports_unknown_bits() { "present" } else { "none" });
+
+ if msg.local_features.initial_routing_sync() {
+ peer.sync_status = InitSyncTracker::ChannelsSyncing(0);
+ peers.peers_needing_send.insert(peer_descriptor.clone());
+ }
+ peer.their_global_features = Some(msg.global_features);
+ peer.their_local_features = Some(msg.local_features);
+
+ if !peer.outbound {
+ let mut local_features = msgs::LocalFeatures::new();
+ if self.initial_syncs_sent.load(Ordering::Acquire) < INITIAL_SYNCS_TO_SEND {
+ self.initial_syncs_sent.fetch_add(1, Ordering::AcqRel);
+ local_features.set_initial_routing_sync();
+ }
+
+ encode_and_send_msg!(msgs::Init {
+ global_features: msgs::GlobalFeatures::new(),
+ local_features,
+ }, 16);
+ }
+
+ self.message_handler.chan_handler.peer_connected(&peer.their_node_id.unwrap());
+ },
+ 17 => {
+ let msg = try_potential_decodeerror!(msgs::ErrorMessage::read(&mut reader));
+ let mut data_is_printable = true;
+ for b in msg.data.bytes() {
+ if b < 32 || b > 126 {
+ data_is_printable = false;
+ break;
+ }
+ }
+
+ if data_is_printable {
+ log_debug!(self, "Got Err message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.data);
+ } else {
+ log_debug!(self, "Got Err message from {} with non-ASCII error message", log_pubkey!(peer.their_node_id.unwrap()));
+ }
+ self.message_handler.chan_handler.handle_error(&peer.their_node_id.unwrap(), &msg);
+ if msg.channel_id == [0; 32] {
+ return Err(PeerHandleError{ no_connection_possible: true });
+ }
+ },
+
+ 18 => {
+ let msg = try_potential_decodeerror!(msgs::Ping::read(&mut reader));
+ if msg.ponglen < 65532 {
+ let resp = msgs::Pong { byteslen: msg.ponglen };
+ encode_and_send_msg!(resp, 19);
+ }
+ },
+ 19 => {
+ try_potential_decodeerror!(msgs::Pong::read(&mut reader));
+ },
+
+ // Channel control:
+ 32 => {
+ let msg = try_potential_decodeerror!(msgs::OpenChannel::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_open_channel(&peer.their_node_id.unwrap(), peer.their_local_features.clone().unwrap(), &msg));
+ },
+ 33 => {
+ let msg = try_potential_decodeerror!(msgs::AcceptChannel::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_accept_channel(&peer.their_node_id.unwrap(), peer.their_local_features.clone().unwrap(), &msg));
+ },
+
+ 34 => {
+ let msg = try_potential_decodeerror!(msgs::FundingCreated::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_created(&peer.their_node_id.unwrap(), &msg));
+ },
+ 35 => {
+ let msg = try_potential_decodeerror!(msgs::FundingSigned::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_signed(&peer.their_node_id.unwrap(), &msg));
+ },
+ 36 => {
+ let msg = try_potential_decodeerror!(msgs::FundingLocked::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_funding_locked(&peer.their_node_id.unwrap(), &msg));
+ },
+
+ 38 => {
+ let msg = try_potential_decodeerror!(msgs::Shutdown::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_shutdown(&peer.their_node_id.unwrap(), &msg));
+ },
+ 39 => {
+ let msg = try_potential_decodeerror!(msgs::ClosingSigned::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_closing_signed(&peer.their_node_id.unwrap(), &msg));
+ },
+
+ 128 => {
+ let msg = try_potential_decodeerror!(msgs::UpdateAddHTLC::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_update_add_htlc(&peer.their_node_id.unwrap(), &msg));
+ },
+ 130 => {
+ let msg = try_potential_decodeerror!(msgs::UpdateFulfillHTLC::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fulfill_htlc(&peer.their_node_id.unwrap(), &msg));
+ },
+ 131 => {
+ let msg = try_potential_decodeerror!(msgs::UpdateFailHTLC::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_htlc(&peer.their_node_id.unwrap(), &msg));
+ },
+ 135 => {
+ let msg = try_potential_decodeerror!(msgs::UpdateFailMalformedHTLC::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fail_malformed_htlc(&peer.their_node_id.unwrap(), &msg));
+ },
+
+ 132 => {
+ let msg = try_potential_decodeerror!(msgs::CommitmentSigned::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_commitment_signed(&peer.their_node_id.unwrap(), &msg));
+ },
+ 133 => {
+ let msg = try_potential_decodeerror!(msgs::RevokeAndACK::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_revoke_and_ack(&peer.their_node_id.unwrap(), &msg));
+ },
+ 134 => {
+ let msg = try_potential_decodeerror!(msgs::UpdateFee::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_update_fee(&peer.their_node_id.unwrap(), &msg));
+ },
+ 136 => {
+ let msg = try_potential_decodeerror!(msgs::ChannelReestablish::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_channel_reestablish(&peer.their_node_id.unwrap(), &msg));
+ },
+
+ // Routing control:
+ 259 => {
+ let msg = try_potential_decodeerror!(msgs::AnnouncementSignatures::read(&mut reader));
+ try_potential_handleerror!(self.message_handler.chan_handler.handle_announcement_signatures(&peer.their_node_id.unwrap(), &msg));
+ },
+ 256 => {
+ let msg = try_potential_decodeerror!(msgs::ChannelAnnouncement::read(&mut reader));
+ let should_forward = try_potential_handleerror!(self.message_handler.route_handler.handle_channel_announcement(&msg));
+
+ if should_forward {
+ // TODO: forward msg along to all our other peers!
+ }
+ },
+ 257 => {
+ let msg = try_potential_decodeerror!(msgs::NodeAnnouncement::read(&mut reader));
+ let should_forward = try_potential_handleerror!(self.message_handler.route_handler.handle_node_announcement(&msg));
+
+ if should_forward {
+ // TODO: forward msg along to all our other peers!
+ }
+ },
+ 258 => {
+ let msg = try_potential_decodeerror!(msgs::ChannelUpdate::read(&mut reader));
+ let should_forward = try_potential_handleerror!(self.message_handler.route_handler.handle_channel_update(&msg));
+
+ if should_forward {
+ // TODO: forward msg along to all our other peers!
+ }
+ },
+ _ => {
+ if (msg_type & 1) == 0 {
+ return Err(PeerHandleError{ no_connection_possible: true });
+ }
+ },
+ }
+ }
+ }
+ }
+ }
+ }
+
+ self.do_attempt_write_data(peer_descriptor, peer);
+
+ peer.pending_outbound_buffer.len() > 10 // pause_read
+ }
+ };
+
+ pause_read
+ };
+
+ Ok(pause_read)
+ }
+
+ /// Checks for any events generated by our handlers and processes them. Includes sending most
+ /// response messages as well as messages generated by calls to handler functions directly (eg
+ /// functions like ChannelManager::process_pending_htlc_forward or send_payment).
+ pub fn process_events(&self) {
+ {
+ // TODO: There are some DoS attacks here where you can flood someone's outbound send
+ // buffer by doing things like announcing channels on another node. We should be willing to
+ // drop optional-ish messages when send buffers get full!
+
+ let mut events_generated = self.message_handler.chan_handler.get_and_clear_pending_msg_events();
+ let mut peers_lock = self.peers.lock().unwrap();
+ let peers = peers_lock.borrow_parts();
+ for event in events_generated.drain(..) {
+ macro_rules! get_peer_for_forwarding {
+ ($node_id: expr, $handle_no_such_peer: block) => {
+ {
+ let descriptor = match peers.node_id_to_descriptor.get($node_id) {
+ Some(descriptor) => descriptor.clone(),
+ None => {
+ $handle_no_such_peer;
+ continue;
+ },
+ };
+ match peers.peers.get_mut(&descriptor) {
+ Some(peer) => {
+ if peer.their_global_features.is_none() {
+ $handle_no_such_peer;
+ continue;
+ }
+ (descriptor, peer)
+ },
+ None => panic!("Inconsistent peers set state!"),
+ }
+ }
+ }
+ }
+ match event {
+ MessageSendEvent::SendAcceptChannel { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendAcceptChannel event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Drop the pending channel? (or just let it timeout, but that sucks)
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 33)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendOpenChannel { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendOpenChannel event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Drop the pending channel? (or just let it timeout, but that sucks)
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 32)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendFundingCreated { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendFundingCreated event in peer_handler for node {} for channel {} (which becomes {})",
+ log_pubkey!(node_id),
+ log_bytes!(msg.temporary_channel_id),
+ log_funding_channel_id!(msg.funding_txid, msg.funding_output_index));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: generate a DiscardFunding event indicating to the wallet that
+ //they should just throw away this funding transaction
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 34)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendFundingSigned { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendFundingSigned event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: generate a DiscardFunding event indicating to the wallet that
+ //they should just throw away this funding transaction
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 35)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendFundingLocked { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendFundingLocked event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Do whatever we're gonna do for handling dropped messages
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 36)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendAnnouncementSignatures { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendAnnouncementSignatures event in peer_handler for node {} for channel {})",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: generate a DiscardFunding event indicating to the wallet that
+ //they should just throw away this funding transaction
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 259)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => {
+ log_trace!(self, "Handling UpdateHTLCs event in peer_handler for node {} with {} adds, {} fulfills, {} fails for channel {}",
+ log_pubkey!(node_id),
+ update_add_htlcs.len(),
+ update_fulfill_htlcs.len(),
+ update_fail_htlcs.len(),
+ log_bytes!(commitment_signed.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Do whatever we're gonna do for handling dropped messages
+ });
+ for msg in update_add_htlcs {
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 128)));
+ }
+ for msg in update_fulfill_htlcs {
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 130)));
+ }
+ for msg in update_fail_htlcs {
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 131)));
+ }
+ for msg in update_fail_malformed_htlcs {
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 135)));
+ }
+ if let &Some(ref msg) = update_fee {
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 134)));
+ }
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(commitment_signed, 132)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendRevokeAndACK event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Do whatever we're gonna do for handling dropped messages
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 133)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendClosingSigned { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendClosingSigned event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Do whatever we're gonna do for handling dropped messages
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 39)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendShutdown { ref node_id, ref msg } => {
+ log_trace!(self, "Handling Shutdown event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Do whatever we're gonna do for handling dropped messages
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 38)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => {
+ log_trace!(self, "Handling SendChannelReestablish event in peer_handler for node {} for channel {}",
+ log_pubkey!(node_id),
+ log_bytes!(msg.channel_id));
+ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
+ //TODO: Do whatever we're gonna do for handling dropped messages
+ });
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 136)));
+ self.do_attempt_write_data(&mut descriptor, peer);
+ },
+ MessageSendEvent::BroadcastChannelAnnouncement { ref msg, ref update_msg } => {
+ log_trace!(self, "Handling BroadcastChannelAnnouncement event in peer_handler for short channel id {}", msg.contents.short_channel_id);
+ if self.message_handler.route_handler.handle_channel_announcement(msg).is_ok() && self.message_handler.route_handler.handle_channel_update(update_msg).is_ok() {
+ let encoded_msg = encode_msg!(msg, 256);
+ let encoded_update_msg = encode_msg!(update_msg, 258);
+
+ for (ref descriptor, ref mut peer) in peers.peers.iter_mut() {
+ if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_global_features.is_none() ||
+ !peer.should_forward_channel(msg.contents.short_channel_id) {
+ continue
+ }
+ match peer.their_node_id {
+ None => continue,
+ Some(their_node_id) => {
+ if their_node_id == msg.contents.node_id_1 || their_node_id == msg.contents.node_id_2 {
+ continue
+ }
+ }
+ }
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..]));
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_update_msg[..]));
+ self.do_attempt_write_data(&mut (*descriptor).clone(), peer);
+ }
+ }
+ },
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ log_trace!(self, "Handling BroadcastChannelUpdate event in peer_handler for short channel id {}", msg.contents.short_channel_id);
+ if self.message_handler.route_handler.handle_channel_update(msg).is_ok() {
+ let encoded_msg = encode_msg!(msg, 258);
+
+ for (ref descriptor, ref mut peer) in peers.peers.iter_mut() {
+ if !peer.channel_encryptor.is_ready_for_encryption() || peer.their_global_features.is_none() ||
+ !peer.should_forward_channel(msg.contents.short_channel_id) {
+ continue
+ }
+ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encoded_msg[..]));
+ self.do_attempt_write_data(&mut (*descriptor).clone(), peer);
+ }
+ }
+ },
+ MessageSendEvent::PaymentFailureNetworkUpdate { ref update } => {
+ self.message_handler.route_handler.handle_htlc_fail_channel_update(update);
+ },
+ MessageSendEvent::HandleError { ref node_id, ref action } => {
- descriptor.disconnect_socket();
- self.message_handler.chan_handler.peer_disconnected(&node_id, false);
++ match *action {
++ msgs::ErrorAction::DisconnectPeer { ref msg } => {
++ if let Some(mut descriptor) = peers.node_id_to_descriptor.remove(node_id) {
++ peers.peers_needing_send.remove(&descriptor);
++ if let Some(mut peer) = peers.peers.remove(&descriptor) {
++ if let Some(ref msg) = *msg {
++ log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with message {}",
++ log_pubkey!(node_id),
++ msg.data);
++ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
++ // This isn't guaranteed to work, but if there is enough free
++ // room in the send buffer, put the error message there...
++ self.do_attempt_write_data(&mut descriptor, &mut peer);
++ } else {
++ log_trace!(self, "Handling DisconnectPeer HandleError event in peer_handler for node {} with no message", log_pubkey!(node_id));
+ }
- },
- msgs::ErrorAction::IgnoreError => {},
- msgs::ErrorAction::SendErrorMessage { ref msg } => {
- log_trace!(self, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
- log_pubkey!(node_id),
- msg.data);
- let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
- //TODO: Do whatever we're gonna do for handling dropped messages
- });
- peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
- self.do_attempt_write_data(&mut descriptor, peer);
- },
- }
- } else {
- log_error!(self, "Got no-action HandleError Event in peer_handler for node {}, no such events should ever be generated!", log_pubkey!(node_id));
+ }
- action: Some(msgs::ErrorAction::DisconnectPeer { msg: None }),
++ descriptor.disconnect_socket();
++ self.message_handler.chan_handler.peer_disconnected(&node_id, false);
++ }
++ },
++ msgs::ErrorAction::IgnoreError => {},
++ msgs::ErrorAction::SendErrorMessage { ref msg } => {
++ log_trace!(self, "Handling SendErrorMessage HandleError event in peer_handler for node {} with message {}",
++ log_pubkey!(node_id),
++ msg.data);
++ let (mut descriptor, peer) = get_peer_for_forwarding!(node_id, {
++ //TODO: Do whatever we're gonna do for handling dropped messages
++ });
++ peer.pending_outbound_buffer.push_back(peer.channel_encryptor.encrypt_message(&encode_msg!(msg, 17)));
++ self.do_attempt_write_data(&mut descriptor, peer);
++ },
+ }
+ }
+ }
+ }
+
+ for mut descriptor in peers.peers_needing_send.drain() {
+ match peers.peers.get_mut(&descriptor) {
+ Some(peer) => self.do_attempt_write_data(&mut descriptor, peer),
+ None => panic!("Inconsistent peers set state!"),
+ }
+ }
+ }
+ }
+
+ /// Indicates that the given socket descriptor's connection is now closed.
+ ///
+ /// This must be called even if a PeerHandleError was given for a read_event or write_event,
+ /// but must NOT be called if a PeerHandleError was provided out of a new_\*\_connection event!
+ ///
+ /// Panics if the descriptor was not previously registered in a successful new_*_connection event.
+ pub fn disconnect_event(&self, descriptor: &Descriptor) {
+ self.disconnect_event_internal(descriptor, false);
+ }
+
+ fn disconnect_event_internal(&self, descriptor: &Descriptor, no_connection_possible: bool) {
+ let mut peers = self.peers.lock().unwrap();
+ peers.peers_needing_send.remove(descriptor);
+ let peer_option = peers.peers.remove(descriptor);
+ match peer_option {
+ None => panic!("Descriptor for disconnect_event is not already known to PeerManager"),
+ Some(peer) => {
+ match peer.their_node_id {
+ Some(node_id) => {
+ peers.node_id_to_descriptor.remove(&node_id);
+ self.message_handler.chan_handler.peer_disconnected(&node_id, no_connection_possible);
+ },
+ None => {}
+ }
+ }
+ };
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use ln::peer_handler::{PeerManager, MessageHandler, SocketDescriptor};
+ use ln::msgs;
+ use util::events;
+ use util::test_utils;
+ use util::logger::Logger;
+
+ use secp256k1::Secp256k1;
+ use secp256k1::key::{SecretKey, PublicKey};
+
+ use rand::{thread_rng, Rng};
+
+ use std::sync::{Arc};
+
+ #[derive(PartialEq, Eq, Clone, Hash)]
+ struct FileDescriptor {
+ fd: u16,
+ }
+
+ impl SocketDescriptor for FileDescriptor {
+ fn send_data(&mut self, data: &[u8], _resume_read: bool) -> usize {
+ data.len()
+ }
+
+ fn disconnect_socket(&mut self) {}
+ }
+
+ fn create_network(peer_count: usize) -> Vec<PeerManager<FileDescriptor>> {
+ let mut peers = Vec::new();
+ let mut rng = thread_rng();
+ let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+ let mut ephemeral_bytes = [0; 32];
+ rng.fill_bytes(&mut ephemeral_bytes);
+
+ for _ in 0..peer_count {
+ let chan_handler = test_utils::TestChannelMessageHandler::new();
+ let router = test_utils::TestRoutingMessageHandler::new();
+ let node_id = {
+ let mut key_slice = [0;32];
+ rng.fill_bytes(&mut key_slice);
+ SecretKey::from_slice(&key_slice).unwrap()
+ };
+ let msg_handler = MessageHandler { chan_handler: Arc::new(chan_handler), route_handler: Arc::new(router) };
+ let peer = PeerManager::new(msg_handler, node_id, &ephemeral_bytes, Arc::clone(&logger));
+ peers.push(peer);
+ }
+
+ peers
+ }
+
+ fn establish_connection(peer_a: &PeerManager<FileDescriptor>, peer_b: &PeerManager<FileDescriptor>) {
+ let secp_ctx = Secp256k1::new();
+ let their_id = PublicKey::from_secret_key(&secp_ctx, &peer_b.our_node_secret);
+ let fd = FileDescriptor { fd: 1};
+ peer_a.new_inbound_connection(fd.clone()).unwrap();
+ peer_a.peers.lock().unwrap().node_id_to_descriptor.insert(their_id, fd.clone());
+ }
+
+ #[test]
+ fn test_disconnect_peer() {
+ // Simple test which builds a network of PeerManager, connects and brings them to NoiseState::Finished and
+ // push a DisconnectPeer event to remove the node flagged by id
+ let mut peers = create_network(2);
+ establish_connection(&peers[0], &peers[1]);
+ assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 1);
+
+ let secp_ctx = Secp256k1::new();
+ let their_id = PublicKey::from_secret_key(&secp_ctx, &peers[1].our_node_secret);
+
+ let chan_handler = test_utils::TestChannelMessageHandler::new();
+ chan_handler.pending_events.lock().unwrap().push(events::MessageSendEvent::HandleError {
+ node_id: their_id,
++ action: msgs::ErrorAction::DisconnectPeer { msg: None },
+ });
+ assert_eq!(chan_handler.pending_events.lock().unwrap().len(), 1);
+ peers[0].message_handler.chan_handler = Arc::new(chan_handler);
+
+ peers[0].process_events();
+ assert_eq!(peers[0].peers.lock().unwrap().peers.len(), 0);
+ }
+}
--- /dev/null
- use ln::msgs::{DecodeError,ErrorAction,HandleError,RoutingMessageHandler,NetAddress,GlobalFeatures};
+//! The top-level routing/network map tracking logic lives here.
+//!
+//! You probably want to create a Router and use that as your RoutingMessageHandler and then
+//! interrogate it to get routes for your own payments.
+
+use secp256k1::key::PublicKey;
+use secp256k1::Secp256k1;
+use secp256k1;
+
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+use bitcoin_hashes::Hash;
+use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::opcodes;
+
+use chain::chaininterface::{ChainError, ChainWatchInterface};
+use ln::channelmanager;
- Err(_) => return Err(HandleError{err: "Invalid signature from remote node", action: None}),
++use ln::msgs::{DecodeError,ErrorAction,LightningError,RoutingMessageHandler,NetAddress,GlobalFeatures};
+use ln::msgs;
+use util::ser::{Writeable, Readable, Writer, ReadableArgs};
+use util::logger::Logger;
+
+use std::cmp;
+use std::sync::{RwLock,Arc};
+use std::collections::{HashMap,BinaryHeap,BTreeMap};
+use std::collections::btree_map::Entry as BtreeEntry;
+use std;
+
+/// A hop in a route
+#[derive(Clone, PartialEq)]
+pub struct RouteHop {
+ /// The node_id of the node at this hop.
+ pub pubkey: PublicKey,
+ /// The channel that should be used from the previous hop to reach this node.
+ pub short_channel_id: u64,
+ /// The fee taken on this hop. For the last hop, this should be the full value of the payment.
+ pub fee_msat: u64,
+ /// The CLTV delta added for this hop. For the last hop, this should be the full CLTV value
+ /// expected at the destination, in excess of the current block height.
+ pub cltv_expiry_delta: u32,
+}
+
+/// A route from us through the network to a destination
+#[derive(Clone, PartialEq)]
+pub struct Route {
+ /// The list of hops, NOT INCLUDING our own, where the last hop is the destination. Thus, this
+ /// must always be at least length one. By protocol rules, this may not currently exceed 20 in
+ /// length.
+ pub hops: Vec<RouteHop>,
+}
+
+impl Writeable for Route {
+ fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ (self.hops.len() as u8).write(writer)?;
+ for hop in self.hops.iter() {
+ hop.pubkey.write(writer)?;
+ hop.short_channel_id.write(writer)?;
+ hop.fee_msat.write(writer)?;
+ hop.cltv_expiry_delta.write(writer)?;
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for Route {
+ fn read(reader: &mut R) -> Result<Route, DecodeError> {
+ let hops_count: u8 = Readable::read(reader)?;
+ let mut hops = Vec::with_capacity(hops_count as usize);
+ for _ in 0..hops_count {
+ hops.push(RouteHop {
+ pubkey: Readable::read(reader)?,
+ short_channel_id: Readable::read(reader)?,
+ fee_msat: Readable::read(reader)?,
+ cltv_expiry_delta: Readable::read(reader)?,
+ });
+ }
+ Ok(Route {
+ hops
+ })
+ }
+}
+
+#[derive(PartialEq)]
+struct DirectionalChannelInfo {
+ src_node_id: PublicKey,
+ last_update: u32,
+ enabled: bool,
+ cltv_expiry_delta: u16,
+ htlc_minimum_msat: u64,
+ fee_base_msat: u32,
+ fee_proportional_millionths: u32,
+ last_update_message: Option<msgs::ChannelUpdate>,
+}
+
+impl std::fmt::Display for DirectionalChannelInfo {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+ write!(f, "src_node_id {}, last_update {}, enabled {}, cltv_expiry_delta {}, htlc_minimum_msat {}, fee_base_msat {}, fee_proportional_millionths {}", log_pubkey!(self.src_node_id), self.last_update, self.enabled, self.cltv_expiry_delta, self.htlc_minimum_msat, self.fee_base_msat, self.fee_proportional_millionths)?;
+ Ok(())
+ }
+}
+
+impl_writeable!(DirectionalChannelInfo, 0, {
+ src_node_id,
+ last_update,
+ enabled,
+ cltv_expiry_delta,
+ htlc_minimum_msat,
+ fee_base_msat,
+ fee_proportional_millionths,
+ last_update_message
+});
+
+#[derive(PartialEq)]
+struct ChannelInfo {
+ features: GlobalFeatures,
+ one_to_two: DirectionalChannelInfo,
+ two_to_one: DirectionalChannelInfo,
+ //this is cached here so we can send out it later if required by route_init_sync
+ //keep an eye on this to see if the extra memory is a problem
+ announcement_message: Option<msgs::ChannelAnnouncement>,
+}
+
+impl std::fmt::Display for ChannelInfo {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+ write!(f, "features: {}, one_to_two: {}, two_to_one: {}", log_bytes!(self.features.encode()), self.one_to_two, self.two_to_one)?;
+ Ok(())
+ }
+}
+
+impl_writeable!(ChannelInfo, 0, {
+ features,
+ one_to_two,
+ two_to_one,
+ announcement_message
+});
+
+#[derive(PartialEq)]
+struct NodeInfo {
+ #[cfg(feature = "non_bitcoin_chain_hash_routing")]
+ channels: Vec<(u64, Sha256dHash)>,
+ #[cfg(not(feature = "non_bitcoin_chain_hash_routing"))]
+ channels: Vec<u64>,
+
+ lowest_inbound_channel_fee_base_msat: u32,
+ lowest_inbound_channel_fee_proportional_millionths: u32,
+
+ features: GlobalFeatures,
+ last_update: u32,
+ rgb: [u8; 3],
+ alias: [u8; 32],
+ addresses: Vec<NetAddress>,
+ //this is cached here so we can send out it later if required by route_init_sync
+ //keep an eye on this to see if the extra memory is a problem
+ announcement_message: Option<msgs::NodeAnnouncement>,
+}
+
+impl std::fmt::Display for NodeInfo {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+ write!(f, "features: {}, last_update: {}, lowest_inbound_channel_fee_base_msat: {}, lowest_inbound_channel_fee_proportional_millionths: {}, channels: {:?}", log_bytes!(self.features.encode()), self.last_update, self.lowest_inbound_channel_fee_base_msat, self.lowest_inbound_channel_fee_proportional_millionths, &self.channels[..])?;
+ Ok(())
+ }
+}
+
+impl Writeable for NodeInfo {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ (self.channels.len() as u64).write(writer)?;
+ for ref chan in self.channels.iter() {
+ chan.write(writer)?;
+ }
+ self.lowest_inbound_channel_fee_base_msat.write(writer)?;
+ self.lowest_inbound_channel_fee_proportional_millionths.write(writer)?;
+ self.features.write(writer)?;
+ self.last_update.write(writer)?;
+ self.rgb.write(writer)?;
+ self.alias.write(writer)?;
+ (self.addresses.len() as u64).write(writer)?;
+ for ref addr in &self.addresses {
+ addr.write(writer)?;
+ }
+ self.announcement_message.write(writer)?;
+ Ok(())
+ }
+}
+
+const MAX_ALLOC_SIZE: u64 = 64*1024;
+
+impl<R: ::std::io::Read> Readable<R> for NodeInfo {
+ fn read(reader: &mut R) -> Result<NodeInfo, DecodeError> {
+ let channels_count: u64 = Readable::read(reader)?;
+ let mut channels = Vec::with_capacity(cmp::min(channels_count, MAX_ALLOC_SIZE / 8) as usize);
+ for _ in 0..channels_count {
+ channels.push(Readable::read(reader)?);
+ }
+ let lowest_inbound_channel_fee_base_msat = Readable::read(reader)?;
+ let lowest_inbound_channel_fee_proportional_millionths = Readable::read(reader)?;
+ let features = Readable::read(reader)?;
+ let last_update = Readable::read(reader)?;
+ let rgb = Readable::read(reader)?;
+ let alias = Readable::read(reader)?;
+ let addresses_count: u64 = Readable::read(reader)?;
+ let mut addresses = Vec::with_capacity(cmp::min(addresses_count, MAX_ALLOC_SIZE / 40) as usize);
+ for _ in 0..addresses_count {
+ match Readable::read(reader) {
+ Ok(Ok(addr)) => { addresses.push(addr); },
+ Ok(Err(_)) => return Err(DecodeError::InvalidValue),
+ Err(DecodeError::ShortRead) => return Err(DecodeError::BadLengthDescriptor),
+ _ => unreachable!(),
+ }
+ }
+ let announcement_message = Readable::read(reader)?;
+ Ok(NodeInfo {
+ channels,
+ lowest_inbound_channel_fee_base_msat,
+ lowest_inbound_channel_fee_proportional_millionths,
+ features,
+ last_update,
+ rgb,
+ alias,
+ addresses,
+ announcement_message
+ })
+ }
+}
+
+#[derive(PartialEq)]
+struct NetworkMap {
+ #[cfg(feature = "non_bitcoin_chain_hash_routing")]
+ channels: BTreeMap<(u64, Sha256dHash), ChannelInfo>,
+ #[cfg(not(feature = "non_bitcoin_chain_hash_routing"))]
+ channels: BTreeMap<u64, ChannelInfo>,
+
+ our_node_id: PublicKey,
+ nodes: BTreeMap<PublicKey, NodeInfo>,
+}
+
+impl Writeable for NetworkMap {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ (self.channels.len() as u64).write(writer)?;
+ for (ref chan_id, ref chan_info) in self.channels.iter() {
+ (*chan_id).write(writer)?;
+ chan_info.write(writer)?;
+ }
+ self.our_node_id.write(writer)?;
+ (self.nodes.len() as u64).write(writer)?;
+ for (ref node_id, ref node_info) in self.nodes.iter() {
+ node_id.write(writer)?;
+ node_info.write(writer)?;
+ }
+ Ok(())
+ }
+}
+
+impl<R: ::std::io::Read> Readable<R> for NetworkMap {
+ fn read(reader: &mut R) -> Result<NetworkMap, DecodeError> {
+ let channels_count: u64 = Readable::read(reader)?;
+ let mut channels = BTreeMap::new();
+ for _ in 0..channels_count {
+ let chan_id: u64 = Readable::read(reader)?;
+ let chan_info = Readable::read(reader)?;
+ channels.insert(chan_id, chan_info);
+ }
+ let our_node_id = Readable::read(reader)?;
+ let nodes_count: u64 = Readable::read(reader)?;
+ let mut nodes = BTreeMap::new();
+ for _ in 0..nodes_count {
+ let node_id = Readable::read(reader)?;
+ let node_info = Readable::read(reader)?;
+ nodes.insert(node_id, node_info);
+ }
+ Ok(NetworkMap {
+ channels,
+ our_node_id,
+ nodes,
+ })
+ }
+}
+
+struct MutNetworkMap<'a> {
+ #[cfg(feature = "non_bitcoin_chain_hash_routing")]
+ channels: &'a mut BTreeMap<(u64, Sha256dHash), ChannelInfo>,
+ #[cfg(not(feature = "non_bitcoin_chain_hash_routing"))]
+ channels: &'a mut BTreeMap<u64, ChannelInfo>,
+ nodes: &'a mut BTreeMap<PublicKey, NodeInfo>,
+}
+impl NetworkMap {
+ fn borrow_parts(&mut self) -> MutNetworkMap {
+ MutNetworkMap {
+ channels: &mut self.channels,
+ nodes: &mut self.nodes,
+ }
+ }
+}
+impl std::fmt::Display for NetworkMap {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
+ write!(f, "Node id {} network map\n[Channels]\n", log_pubkey!(self.our_node_id))?;
+ for (key, val) in self.channels.iter() {
+ write!(f, " {}: {}\n", key, val)?;
+ }
+ write!(f, "[Nodes]\n")?;
+ for (key, val) in self.nodes.iter() {
+ write!(f, " {}: {}\n", log_pubkey!(key), val)?;
+ }
+ Ok(())
+ }
+}
+
+impl NetworkMap {
+ #[cfg(feature = "non_bitcoin_chain_hash_routing")]
+ #[inline]
+ fn get_key(short_channel_id: u64, chain_hash: Sha256dHash) -> (u64, Sha256dHash) {
+ (short_channel_id, chain_hash)
+ }
+
+ #[cfg(not(feature = "non_bitcoin_chain_hash_routing"))]
+ #[inline]
+ fn get_key(short_channel_id: u64, _: Sha256dHash) -> u64 {
+ short_channel_id
+ }
+
+ #[cfg(feature = "non_bitcoin_chain_hash_routing")]
+ #[inline]
+ fn get_short_id(id: &(u64, Sha256dHash)) -> &u64 {
+ &id.0
+ }
+
+ #[cfg(not(feature = "non_bitcoin_chain_hash_routing"))]
+ #[inline]
+ fn get_short_id(id: &u64) -> &u64 {
+ id
+ }
+}
+
+/// A channel descriptor which provides a last-hop route to get_route
+pub struct RouteHint {
+ /// The node_id of the non-target end of the route
+ pub src_node_id: PublicKey,
+ /// The short_channel_id of this channel
+ pub short_channel_id: u64,
+ /// The static msat-denominated fee which must be paid to use this channel
+ pub fee_base_msat: u32,
+ /// The dynamic proportional fee which must be paid to use this channel, denominated in
+ /// millionths of the value being forwarded to the next hop.
+ pub fee_proportional_millionths: u32,
+ /// The difference in CLTV values between this node and the next node.
+ pub cltv_expiry_delta: u16,
+ /// The minimum value, in msat, which must be relayed to the next hop.
+ pub htlc_minimum_msat: u64,
+}
+
+/// Tracks a view of the network, receiving updates from peers and generating Routes to
+/// payment destinations.
+pub struct Router {
+ secp_ctx: Secp256k1<secp256k1::VerifyOnly>,
+ network_map: RwLock<NetworkMap>,
+ chain_monitor: Arc<ChainWatchInterface>,
+ logger: Arc<Logger>,
+}
+
+const SERIALIZATION_VERSION: u8 = 1;
+const MIN_SERIALIZATION_VERSION: u8 = 1;
+
+impl Writeable for Router {
+ fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
+ writer.write_all(&[SERIALIZATION_VERSION; 1])?;
+ writer.write_all(&[MIN_SERIALIZATION_VERSION; 1])?;
+
+ let network = self.network_map.read().unwrap();
+ network.write(writer)?;
+ Ok(())
+ }
+}
+
+/// Arguments for the creation of a Router that are not deserialized.
+/// At a high-level, the process for deserializing a Router and resuming normal operation is:
+/// 1) Deserialize the Router by filling in this struct and calling <Router>::read(reaser, args).
+/// 2) Register the new Router with your ChainWatchInterface
+pub struct RouterReadArgs {
+ /// The ChainWatchInterface for use in the Router in the future.
+ ///
+ /// No calls to the ChainWatchInterface will be made during deserialization.
+ pub chain_monitor: Arc<ChainWatchInterface>,
+ /// The Logger for use in the ChannelManager and which may be used to log information during
+ /// deserialization.
+ pub logger: Arc<Logger>,
+}
+
+impl<R: ::std::io::Read> ReadableArgs<R, RouterReadArgs> for Router {
+ fn read(reader: &mut R, args: RouterReadArgs) -> Result<Router, DecodeError> {
+ let _ver: u8 = Readable::read(reader)?;
+ let min_ver: u8 = Readable::read(reader)?;
+ if min_ver > SERIALIZATION_VERSION {
+ return Err(DecodeError::UnknownVersion);
+ }
+ let network_map = Readable::read(reader)?;
+ Ok(Router {
+ secp_ctx: Secp256k1::verification_only(),
+ network_map: RwLock::new(network_map),
+ chain_monitor: args.chain_monitor,
+ logger: args.logger,
+ })
+ }
+}
+
+macro_rules! secp_verify_sig {
+ ( $secp_ctx: expr, $msg: expr, $sig: expr, $pubkey: expr ) => {
+ match $secp_ctx.verify($msg, $sig, $pubkey) {
+ Ok(_) => {},
- fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, HandleError> {
++ Err(_) => return Err(LightningError{err: "Invalid signature from remote node", action: ErrorAction::IgnoreError}),
+ }
+ };
+}
+
+impl RoutingMessageHandler for Router {
- None => Err(HandleError{err: "No existing channels for node_announcement", action: Some(ErrorAction::IgnoreError)}),
++ fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
+ let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
+ secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &msg.contents.node_id);
+
+ if msg.contents.features.requires_unknown_bits() {
+ panic!("Unknown-required-features NodeAnnouncements should never deserialize!");
+ }
+
+ let mut network = self.network_map.write().unwrap();
+ match network.nodes.get_mut(&msg.contents.node_id) {
- return Err(HandleError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)});
++ None => Err(LightningError{err: "No existing channels for node_announcement", action: ErrorAction::IgnoreError}),
+ Some(node) => {
+ if node.last_update >= msg.contents.timestamp {
- fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, HandleError> {
++ return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError});
+ }
+
+ node.features = msg.contents.features.clone();
+ node.last_update = msg.contents.timestamp;
+ node.rgb = msg.contents.rgb;
+ node.alias = msg.contents.alias;
+ node.addresses = msg.contents.addresses.clone();
+
+ let should_relay = msg.contents.excess_data.is_empty() && msg.contents.excess_address_data.is_empty() && !msg.contents.features.supports_unknown_bits();
+ node.announcement_message = if should_relay { Some(msg.clone()) } else { None };
+ Ok(should_relay)
+ }
+ }
+ }
+
- return Err(HandleError{err: "Channel announcement node had a channel with itself", action: Some(ErrorAction::IgnoreError)});
++ fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
+ if msg.contents.node_id_1 == msg.contents.node_id_2 || msg.contents.bitcoin_key_1 == msg.contents.bitcoin_key_2 {
- return Err(HandleError{err: "Channel announcement keys didn't match on-chain script", action: Some(ErrorAction::IgnoreError)});
++ return Err(LightningError{err: "Channel announcement node had a channel with itself", action: ErrorAction::IgnoreError});
+ }
+
+ let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
+ secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_1, &msg.contents.node_id_1);
+ secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.node_signature_2, &msg.contents.node_id_2);
+ secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_1, &msg.contents.bitcoin_key_1);
+ secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.bitcoin_signature_2, &msg.contents.bitcoin_key_2);
+
+ if msg.contents.features.requires_unknown_bits() {
+ panic!("Unknown-required-features ChannelAnnouncements should never deserialize!");
+ }
+
+ let checked_utxo = match self.chain_monitor.get_chain_utxo(msg.contents.chain_hash, msg.contents.short_channel_id) {
+ Ok((script_pubkey, _value)) => {
+ let expected_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
+ .push_slice(&msg.contents.bitcoin_key_1.serialize())
+ .push_slice(&msg.contents.bitcoin_key_2.serialize())
+ .push_opcode(opcodes::all::OP_PUSHNUM_2)
+ .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
+ if script_pubkey != expected_script {
- return Err(HandleError{err: "Channel announced on an unknown chain", action: Some(ErrorAction::IgnoreError)});
++ return Err(LightningError{err: "Channel announcement keys didn't match on-chain script", action: ErrorAction::IgnoreError});
+ }
+ //TODO: Check if value is worth storing, use it to inform routing, and compare it
+ //to the new HTLC max field in channel_update
+ true
+ },
+ Err(ChainError::NotSupported) => {
+ // Tentatively accept, potentially exposing us to DoS attacks
+ false
+ },
+ Err(ChainError::NotWatched) => {
- return Err(HandleError{err: "Channel announced without corresponding UTXO entry", action: Some(ErrorAction::IgnoreError)});
++ return Err(LightningError{err: "Channel announced on an unknown chain", action: ErrorAction::IgnoreError});
+ },
+ Err(ChainError::UnknownTx) => {
- return Err(HandleError{err: "Already have knowledge of channel", action: Some(ErrorAction::IgnoreError)})
++ return Err(LightningError{err: "Channel announced without corresponding UTXO entry", action: ErrorAction::IgnoreError});
+ },
+ };
+
+ let mut network_lock = self.network_map.write().unwrap();
+ let network = network_lock.borrow_parts();
+
+ let should_relay = msg.contents.excess_data.is_empty() && !msg.contents.features.supports_unknown_bits();
+
+ let chan_info = ChannelInfo {
+ features: msg.contents.features.clone(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: msg.contents.node_id_1.clone(),
+ last_update: 0,
+ enabled: false,
+ cltv_expiry_delta: u16::max_value(),
+ htlc_minimum_msat: u64::max_value(),
+ fee_base_msat: u32::max_value(),
+ fee_proportional_millionths: u32::max_value(),
+ last_update_message: None,
+ },
+ two_to_one: DirectionalChannelInfo {
+ src_node_id: msg.contents.node_id_2.clone(),
+ last_update: 0,
+ enabled: false,
+ cltv_expiry_delta: u16::max_value(),
+ htlc_minimum_msat: u64::max_value(),
+ fee_base_msat: u32::max_value(),
+ fee_proportional_millionths: u32::max_value(),
+ last_update_message: None,
+ },
+ announcement_message: if should_relay { Some(msg.clone()) } else { None },
+ };
+
+ match network.channels.entry(NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash)) {
+ BtreeEntry::Occupied(mut entry) => {
+ //TODO: because asking the blockchain if short_channel_id is valid is only optional
+ //in the blockchain API, we need to handle it smartly here, though it's unclear
+ //exactly how...
+ if checked_utxo {
+ // Either our UTXO provider is busted, there was a reorg, or the UTXO provider
+ // only sometimes returns results. In any case remove the previous entry. Note
+ // that the spec expects us to "blacklist" the node_ids involved, but we can't
+ // do that because
+ // a) we don't *require* a UTXO provider that always returns results.
+ // b) we don't track UTXOs of channels we know about and remove them if they
+ // get reorg'd out.
+ // c) it's unclear how to do so without exposing ourselves to massive DoS risk.
+ Self::remove_channel_in_nodes(network.nodes, &entry.get(), msg.contents.short_channel_id);
+ *entry.get_mut() = chan_info;
+ } else {
- fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<bool, HandleError> {
++ return Err(LightningError{err: "Already have knowledge of channel", action: ErrorAction::IgnoreError})
+ }
+ },
+ BtreeEntry::Vacant(entry) => {
+ entry.insert(chan_info);
+ }
+ };
+
+ macro_rules! add_channel_to_node {
+ ( $node_id: expr ) => {
+ match network.nodes.entry($node_id) {
+ BtreeEntry::Occupied(node_entry) => {
+ node_entry.into_mut().channels.push(NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash));
+ },
+ BtreeEntry::Vacant(node_entry) => {
+ node_entry.insert(NodeInfo {
+ channels: vec!(NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash)),
+ lowest_inbound_channel_fee_base_msat: u32::max_value(),
+ lowest_inbound_channel_fee_proportional_millionths: u32::max_value(),
+ features: GlobalFeatures::new(),
+ last_update: 0,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ }
+ }
+ };
+ }
+
+ add_channel_to_node!(msg.contents.node_id_1);
+ add_channel_to_node!(msg.contents.node_id_2);
+
+ Ok(should_relay)
+ }
+
+ fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) {
+ match update {
+ &msgs::HTLCFailChannelUpdate::ChannelUpdateMessage { ref msg } => {
+ let _ = self.handle_channel_update(msg);
+ },
+ &msgs::HTLCFailChannelUpdate::ChannelClosed { ref short_channel_id, ref is_permanent } => {
+ let mut network = self.network_map.write().unwrap();
+ if *is_permanent {
+ if let Some(chan) = network.channels.remove(short_channel_id) {
+ Self::remove_channel_in_nodes(&mut network.nodes, &chan, *short_channel_id);
+ }
+ } else {
+ if let Some(chan) = network.channels.get_mut(short_channel_id) {
+ chan.one_to_two.enabled = false;
+ chan.two_to_one.enabled = false;
+ }
+ }
+ },
+ &msgs::HTLCFailChannelUpdate::NodeFailure { ref node_id, ref is_permanent } => {
+ if *is_permanent {
+ //TODO: Wholly remove the node
+ } else {
+ self.mark_node_bad(node_id, false);
+ }
+ },
+ }
+ }
+
- None => return Err(HandleError{err: "Couldn't find channel for update", action: Some(ErrorAction::IgnoreError)}),
++ fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> {
+ let mut network = self.network_map.write().unwrap();
+ let dest_node_id;
+ let chan_enabled = msg.contents.flags & (1 << 1) != (1 << 1);
+ let chan_was_enabled;
+
+ match network.channels.get_mut(&NetworkMap::get_key(msg.contents.short_channel_id, msg.contents.chain_hash)) {
- return Err(HandleError{err: "Update older than last processed update", action: Some(ErrorAction::IgnoreError)});
++ None => return Err(LightningError{err: "Couldn't find channel for update", action: ErrorAction::IgnoreError}),
+ Some(channel) => {
+ macro_rules! maybe_update_channel_info {
+ ( $target: expr) => {
+ if $target.last_update >= msg.contents.timestamp {
- pub fn get_route(&self, target: &PublicKey, first_hops: Option<&[channelmanager::ChannelDetails]>, last_hops: &[RouteHint], final_value_msat: u64, final_cltv: u32) -> Result<Route, HandleError> {
++ return Err(LightningError{err: "Update older than last processed update", action: ErrorAction::IgnoreError});
+ }
+ chan_was_enabled = $target.enabled;
+ $target.last_update = msg.contents.timestamp;
+ $target.enabled = chan_enabled;
+ $target.cltv_expiry_delta = msg.contents.cltv_expiry_delta;
+ $target.htlc_minimum_msat = msg.contents.htlc_minimum_msat;
+ $target.fee_base_msat = msg.contents.fee_base_msat;
+ $target.fee_proportional_millionths = msg.contents.fee_proportional_millionths;
+ $target.last_update_message = if msg.contents.excess_data.is_empty() {
+ Some(msg.clone())
+ } else {
+ None
+ };
+ }
+ }
+ let msg_hash = hash_to_message!(&Sha256dHash::hash(&msg.contents.encode()[..])[..]);
+ if msg.contents.flags & 1 == 1 {
+ dest_node_id = channel.one_to_two.src_node_id.clone();
+ secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &channel.two_to_one.src_node_id);
+ maybe_update_channel_info!(channel.two_to_one);
+ } else {
+ dest_node_id = channel.two_to_one.src_node_id.clone();
+ secp_verify_sig!(self.secp_ctx, &msg_hash, &msg.signature, &channel.one_to_two.src_node_id);
+ maybe_update_channel_info!(channel.one_to_two);
+ }
+ }
+ }
+
+ if chan_enabled {
+ let node = network.nodes.get_mut(&dest_node_id).unwrap();
+ node.lowest_inbound_channel_fee_base_msat = cmp::min(node.lowest_inbound_channel_fee_base_msat, msg.contents.fee_base_msat);
+ node.lowest_inbound_channel_fee_proportional_millionths = cmp::min(node.lowest_inbound_channel_fee_proportional_millionths, msg.contents.fee_proportional_millionths);
+ } else if chan_was_enabled {
+ let mut lowest_inbound_channel_fee_base_msat = u32::max_value();
+ let mut lowest_inbound_channel_fee_proportional_millionths = u32::max_value();
+
+ {
+ let node = network.nodes.get(&dest_node_id).unwrap();
+
+ for chan_id in node.channels.iter() {
+ let chan = network.channels.get(chan_id).unwrap();
+ if chan.one_to_two.src_node_id == dest_node_id {
+ lowest_inbound_channel_fee_base_msat = cmp::min(lowest_inbound_channel_fee_base_msat, chan.two_to_one.fee_base_msat);
+ lowest_inbound_channel_fee_proportional_millionths = cmp::min(lowest_inbound_channel_fee_proportional_millionths, chan.two_to_one.fee_proportional_millionths);
+ } else {
+ lowest_inbound_channel_fee_base_msat = cmp::min(lowest_inbound_channel_fee_base_msat, chan.one_to_two.fee_base_msat);
+ lowest_inbound_channel_fee_proportional_millionths = cmp::min(lowest_inbound_channel_fee_proportional_millionths, chan.one_to_two.fee_proportional_millionths);
+ }
+ }
+ }
+
+ //TODO: satisfy the borrow-checker without a double-map-lookup :(
+ let mut_node = network.nodes.get_mut(&dest_node_id).unwrap();
+ mut_node.lowest_inbound_channel_fee_base_msat = lowest_inbound_channel_fee_base_msat;
+ mut_node.lowest_inbound_channel_fee_proportional_millionths = lowest_inbound_channel_fee_proportional_millionths;
+ }
+
+ Ok(msg.contents.excess_data.is_empty())
+ }
+
+
+ fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, msgs::ChannelUpdate,msgs::ChannelUpdate)> {
+ let mut result = Vec::with_capacity(batch_amount as usize);
+ let network = self.network_map.read().unwrap();
+ let mut iter = network.channels.range(starting_point..);
+ while result.len() < batch_amount as usize {
+ if let Some((_, ref chan)) = iter.next() {
+ if chan.announcement_message.is_some() &&
+ chan.one_to_two.last_update_message.is_some() &&
+ chan.two_to_one.last_update_message.is_some() {
+ result.push((chan.announcement_message.clone().unwrap(),
+ chan.one_to_two.last_update_message.clone().unwrap(),
+ chan.two_to_one.last_update_message.clone().unwrap()));
+ } else {
+ // TODO: We may end up sending un-announced channel_updates if we are sending
+ // initial sync data while receiving announce/updates for this channel.
+ }
+ } else {
+ return result;
+ }
+ }
+ result
+ }
+
+ fn get_next_node_announcements(&self, starting_point: Option<&PublicKey>, batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
+ let mut result = Vec::with_capacity(batch_amount as usize);
+ let network = self.network_map.read().unwrap();
+ let mut iter = if let Some(pubkey) = starting_point {
+ let mut iter = network.nodes.range((*pubkey)..);
+ iter.next();
+ iter
+ } else {
+ network.nodes.range(..)
+ };
+ while result.len() < batch_amount as usize {
+ if let Some((_, ref node)) = iter.next() {
+ if node.announcement_message.is_some() {
+ result.push(node.announcement_message.clone().unwrap());
+ }
+ } else {
+ return result;
+ }
+ }
+ result
+ }
+}
+
+#[derive(Eq, PartialEq)]
+struct RouteGraphNode {
+ pubkey: PublicKey,
+ lowest_fee_to_peer_through_node: u64,
+ lowest_fee_to_node: u64,
+}
+
+impl cmp::Ord for RouteGraphNode {
+ fn cmp(&self, other: &RouteGraphNode) -> cmp::Ordering {
+ other.lowest_fee_to_peer_through_node.cmp(&self.lowest_fee_to_peer_through_node)
+ .then_with(|| other.pubkey.serialize().cmp(&self.pubkey.serialize()))
+ }
+}
+
+impl cmp::PartialOrd for RouteGraphNode {
+ fn partial_cmp(&self, other: &RouteGraphNode) -> Option<cmp::Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+struct DummyDirectionalChannelInfo {
+ src_node_id: PublicKey,
+ cltv_expiry_delta: u32,
+ htlc_minimum_msat: u64,
+ fee_base_msat: u32,
+ fee_proportional_millionths: u32,
+}
+
+impl Router {
+ /// Creates a new router with the given node_id to be used as the source for get_route()
+ pub fn new(our_pubkey: PublicKey, chain_monitor: Arc<ChainWatchInterface>, logger: Arc<Logger>) -> Router {
+ let mut nodes = BTreeMap::new();
+ nodes.insert(our_pubkey.clone(), NodeInfo {
+ channels: Vec::new(),
+ lowest_inbound_channel_fee_base_msat: u32::max_value(),
+ lowest_inbound_channel_fee_proportional_millionths: u32::max_value(),
+ features: GlobalFeatures::new(),
+ last_update: 0,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ Router {
+ secp_ctx: Secp256k1::verification_only(),
+ network_map: RwLock::new(NetworkMap {
+ channels: BTreeMap::new(),
+ our_node_id: our_pubkey,
+ nodes: nodes,
+ }),
+ chain_monitor,
+ logger,
+ }
+ }
+
+ /// Dumps the entire network view of this Router to the logger provided in the constructor at
+ /// level Trace
+ pub fn trace_state(&self) {
+ log_trace!(self, "{}", self.network_map.read().unwrap());
+ }
+
+ /// Get network addresses by node id
+ pub fn get_addresses(&self, pubkey: &PublicKey) -> Option<Vec<NetAddress>> {
+ let network = self.network_map.read().unwrap();
+ network.nodes.get(pubkey).map(|n| n.addresses.clone())
+ }
+
+ /// Marks a node as having failed a route. This will avoid re-using the node in routes for now,
+ /// with an exponential decay in node "badness". Note that there is deliberately no
+ /// mark_channel_bad as a node may simply lie and suggest that an upstream channel from it is
+ /// what failed the route and not the node itself. Instead, setting the blamed_upstream_node
+ /// boolean will reduce the penalty, returning the node to usability faster. If the node is
+ /// behaving correctly, it will disable the failing channel and we will use it again next time.
+ pub fn mark_node_bad(&self, _node_id: &PublicKey, _blamed_upstream_node: bool) {
+ unimplemented!();
+ }
+
+ fn remove_channel_in_nodes(nodes: &mut BTreeMap<PublicKey, NodeInfo>, chan: &ChannelInfo, short_channel_id: u64) {
+ macro_rules! remove_from_node {
+ ($node_id: expr) => {
+ if let BtreeEntry::Occupied(mut entry) = nodes.entry($node_id) {
+ entry.get_mut().channels.retain(|chan_id| {
+ short_channel_id != *NetworkMap::get_short_id(chan_id)
+ });
+ if entry.get().channels.is_empty() {
+ entry.remove_entry();
+ }
+ } else {
+ panic!("Had channel that pointed to unknown node (ie inconsistent network map)!");
+ }
+ }
+ }
+ remove_from_node!(chan.one_to_two.src_node_id);
+ remove_from_node!(chan.two_to_one.src_node_id);
+ }
+
+ /// Gets a route from us to the given target node.
+ ///
+ /// Extra routing hops between known nodes and the target will be used if they are included in
+ /// last_hops.
+ ///
+ /// If some channels aren't announced, it may be useful to fill in a first_hops with the
+ /// results from a local ChannelManager::list_usable_channels() call. If it is filled in, our
+ /// (this Router's) view of our local channels will be ignored, and only those in first_hops
+ /// will be used.
+ ///
+ /// Panics if first_hops contains channels without short_channel_ids
+ /// (ChannelManager::list_usable_channels will never include such channels).
+ ///
+ /// The fees on channels from us to next-hops are ignored (as they are assumed to all be
+ /// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat
+ /// *is* checked as they may change based on the receiving node.
- return Err(HandleError{err: "Cannot generate a route to ourselves", action: None});
++ pub fn get_route(&self, target: &PublicKey, first_hops: Option<&[channelmanager::ChannelDetails]>, last_hops: &[RouteHint], final_value_msat: u64, final_cltv: u32) -> Result<Route, LightningError> {
+ // TODO: Obviously *only* using total fee cost sucks. We should consider weighting by
+ // uptime/success in using a node in the past.
+ let network = self.network_map.read().unwrap();
+
+ if *target == network.our_node_id {
- return Err(HandleError{err: "Cannot generate a route of more value than all existing satoshis", action: None});
++ return Err(LightningError{err: "Cannot generate a route to ourselves", action: ErrorAction::IgnoreError});
+ }
+
+ if final_value_msat > 21_000_000 * 1_0000_0000 * 1000 {
- return Err(HandleError{err: "Cannot route when there are no outbound routes away from us", action: None});
++ return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis", action: ErrorAction::IgnoreError});
+ }
+
+ // We do a dest-to-source Dijkstra's sorting by each node's distance from the destination
+ // plus the minimum per-HTLC fee to get from it to another node (aka "shitty A*").
+ // TODO: There are a few tweaks we could do, including possibly pre-calculating more stuff
+ // to use as the A* heuristic beyond just the cost to get one node further than the current
+ // one.
+
+ let dummy_directional_info = DummyDirectionalChannelInfo { // used for first_hops routes
+ src_node_id: network.our_node_id.clone(),
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ };
+
+ let mut targets = BinaryHeap::new(); //TODO: Do we care about switching to eg Fibbonaci heap?
+ let mut dist = HashMap::with_capacity(network.nodes.len());
+
+ let mut first_hop_targets = HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
+ if let Some(hops) = first_hops {
+ for chan in hops {
+ let short_channel_id = chan.short_channel_id.expect("first_hops should be filled in with usable channels, not pending ones");
+ if chan.remote_network_id == *target {
+ return Ok(Route {
+ hops: vec![RouteHop {
+ pubkey: chan.remote_network_id,
+ short_channel_id,
+ fee_msat: final_value_msat,
+ cltv_expiry_delta: final_cltv,
+ }],
+ });
+ }
+ first_hop_targets.insert(chan.remote_network_id, short_channel_id);
+ }
+ if first_hop_targets.is_empty() {
- None => return Err(HandleError{err: "Failed to find a non-fee-overflowing path to the given destination", action: None}),
++ return Err(LightningError{err: "Cannot route when there are no outbound routes away from us", action: ErrorAction::IgnoreError});
+ }
+ }
+
+ macro_rules! add_entry {
+ // Adds entry which goes from the node pointed to by $directional_info to
+ // $dest_node_id over the channel with id $chan_id with fees described in
+ // $directional_info.
+ ( $chan_id: expr, $dest_node_id: expr, $directional_info: expr, $starting_fee_msat: expr ) => {
+ //TODO: Explore simply adding fee to hit htlc_minimum_msat
+ if $starting_fee_msat as u64 + final_value_msat >= $directional_info.htlc_minimum_msat {
+ let proportional_fee_millions = ($starting_fee_msat + final_value_msat).checked_mul($directional_info.fee_proportional_millionths as u64);
+ if let Some(new_fee) = proportional_fee_millions.and_then(|part| {
+ ($directional_info.fee_base_msat as u64).checked_add(part / 1000000) })
+ {
+ let mut total_fee = $starting_fee_msat as u64;
+ let hm_entry = dist.entry(&$directional_info.src_node_id);
+ let old_entry = hm_entry.or_insert_with(|| {
+ let node = network.nodes.get(&$directional_info.src_node_id).unwrap();
+ (u64::max_value(),
+ node.lowest_inbound_channel_fee_base_msat,
+ node.lowest_inbound_channel_fee_proportional_millionths,
+ RouteHop {
+ pubkey: $dest_node_id.clone(),
+ short_channel_id: 0,
+ fee_msat: 0,
+ cltv_expiry_delta: 0,
+ })
+ });
+ if $directional_info.src_node_id != network.our_node_id {
+ // Ignore new_fee for channel-from-us as we assume all channels-from-us
+ // will have the same effective-fee
+ total_fee += new_fee;
+ if let Some(fee_inc) = final_value_msat.checked_add(total_fee).and_then(|inc| { (old_entry.2 as u64).checked_mul(inc) }) {
+ total_fee += fee_inc / 1000000 + (old_entry.1 as u64);
+ } else {
+ // max_value means we'll always fail the old_entry.0 > total_fee check
+ total_fee = u64::max_value();
+ }
+ }
+ let new_graph_node = RouteGraphNode {
+ pubkey: $directional_info.src_node_id,
+ lowest_fee_to_peer_through_node: total_fee,
+ lowest_fee_to_node: $starting_fee_msat as u64 + new_fee,
+ };
+ if old_entry.0 > total_fee {
+ targets.push(new_graph_node);
+ old_entry.0 = total_fee;
+ old_entry.3 = RouteHop {
+ pubkey: $dest_node_id.clone(),
+ short_channel_id: $chan_id.clone(),
+ fee_msat: new_fee, // This field is ignored on the last-hop anyway
+ cltv_expiry_delta: $directional_info.cltv_expiry_delta as u32,
+ }
+ }
+ }
+ }
+ };
+ }
+
+ macro_rules! add_entries_to_cheapest_to_target_node {
+ ( $node: expr, $node_id: expr, $fee_to_target_msat: expr ) => {
+ if first_hops.is_some() {
+ if let Some(first_hop) = first_hop_targets.get(&$node_id) {
+ add_entry!(first_hop, $node_id, dummy_directional_info, $fee_to_target_msat);
+ }
+ }
+
+ for chan_id in $node.channels.iter() {
+ let chan = network.channels.get(chan_id).unwrap();
+ if chan.one_to_two.src_node_id == *$node_id {
+ // ie $node is one, ie next hop in A* is two, via the two_to_one channel
+ if first_hops.is_none() || chan.two_to_one.src_node_id != network.our_node_id {
+ if chan.two_to_one.enabled {
+ add_entry!(chan_id, chan.one_to_two.src_node_id, chan.two_to_one, $fee_to_target_msat);
+ }
+ }
+ } else {
+ if first_hops.is_none() || chan.one_to_two.src_node_id != network.our_node_id {
+ if chan.one_to_two.enabled {
+ add_entry!(chan_id, chan.two_to_one.src_node_id, chan.one_to_two, $fee_to_target_msat);
+ }
+ }
+ }
+ }
+ };
+ }
+
+ match network.nodes.get(target) {
+ None => {},
+ Some(node) => {
+ add_entries_to_cheapest_to_target_node!(node, target, 0);
+ },
+ }
+
+ for hop in last_hops.iter() {
+ if first_hops.is_none() || hop.src_node_id != network.our_node_id { // first_hop overrules last_hops
+ if network.nodes.get(&hop.src_node_id).is_some() {
+ if first_hops.is_some() {
+ if let Some(first_hop) = first_hop_targets.get(&hop.src_node_id) {
+ add_entry!(first_hop, hop.src_node_id, dummy_directional_info, 0);
+ }
+ }
+ add_entry!(hop.short_channel_id, target, hop, 0);
+ }
+ }
+ }
+
+ while let Some(RouteGraphNode { pubkey, lowest_fee_to_node, .. }) = targets.pop() {
+ if pubkey == network.our_node_id {
+ let mut res = vec!(dist.remove(&network.our_node_id).unwrap().3);
+ while res.last().unwrap().pubkey != *target {
+ let new_entry = match dist.remove(&res.last().unwrap().pubkey) {
+ Some(hop) => hop.3,
- Err(HandleError{err: "Failed to find a path to the given destination", action: None})
++ None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination", action: ErrorAction::IgnoreError}),
+ };
+ res.last_mut().unwrap().fee_msat = new_entry.fee_msat;
+ res.last_mut().unwrap().cltv_expiry_delta = new_entry.cltv_expiry_delta;
+ res.push(new_entry);
+ }
+ res.last_mut().unwrap().fee_msat = final_value_msat;
+ res.last_mut().unwrap().cltv_expiry_delta = final_cltv;
+ let route = Route { hops: res };
+ log_trace!(self, "Got route: {}", log_route!(route));
+ return Ok(route);
+ }
+
+ match network.nodes.get(&pubkey) {
+ None => {},
+ Some(node) => {
+ add_entries_to_cheapest_to_target_node!(node, &pubkey, lowest_fee_to_node);
+ },
+ }
+ }
+
++ Err(LightningError{err: "Failed to find a path to the given destination", action: ErrorAction::IgnoreError})
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use chain::chaininterface;
+ use ln::channelmanager;
+ use ln::router::{Router,NodeInfo,NetworkMap,ChannelInfo,DirectionalChannelInfo,RouteHint};
+ use ln::msgs::GlobalFeatures;
+ use util::test_utils;
+ use util::test_utils::TestVecWriter;
+ use util::logger::Logger;
+ use util::ser::{Writeable, Readable};
+
+ use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+ use bitcoin_hashes::Hash;
+ use bitcoin::network::constants::Network;
+
+ use hex;
+
+ use secp256k1::key::{PublicKey,SecretKey};
+ use secp256k1::Secp256k1;
+
+ use std::sync::Arc;
+
+ #[test]
+ fn route_test() {
+ let secp_ctx = Secp256k1::new();
+ let our_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()[..]).unwrap());
+ let logger: Arc<Logger> = Arc::new(test_utils::TestLogger::new());
+ let chain_monitor = Arc::new(chaininterface::ChainWatchInterfaceUtil::new(Network::Testnet, Arc::clone(&logger)));
+ let router = Router::new(our_id, chain_monitor, Arc::clone(&logger));
+
+ // Build network from our_id to node8:
+ //
+ // -1(1)2- node1 -1(3)2-
+ // / \
+ // our_id -1(12)2- node8 -1(13)2--- node3
+ // \ /
+ // -1(2)2- node2 -1(4)2-
+ //
+ //
+ // chan1 1-to-2: disabled
+ // chan1 2-to-1: enabled, 0 fee
+ //
+ // chan2 1-to-2: enabled, ignored fee
+ // chan2 2-to-1: enabled, 0 fee
+ //
+ // chan3 1-to-2: enabled, 0 fee
+ // chan3 2-to-1: enabled, 100 msat fee
+ //
+ // chan4 1-to-2: enabled, 100% fee
+ // chan4 2-to-1: enabled, 0 fee
+ //
+ // chan12 1-to-2: enabled, ignored fee
+ // chan12 2-to-1: enabled, 0 fee
+ //
+ // chan13 1-to-2: enabled, 200% fee
+ // chan13 2-to-1: enabled, 0 fee
+ //
+ //
+ // -1(5)2- node4 -1(8)2--
+ // | 2 |
+ // | (11) |
+ // / 1 \
+ // node3--1(6)2- node5 -1(9)2--- node7 (not in global route map)
+ // \ /
+ // -1(7)2- node6 -1(10)2-
+ //
+ // chan5 1-to-2: enabled, 100 msat fee
+ // chan5 2-to-1: enabled, 0 fee
+ //
+ // chan6 1-to-2: enabled, 0 fee
+ // chan6 2-to-1: enabled, 0 fee
+ //
+ // chan7 1-to-2: enabled, 100% fee
+ // chan7 2-to-1: enabled, 0 fee
+ //
+ // chan8 1-to-2: enabled, variable fee (0 then 1000 msat)
+ // chan8 2-to-1: enabled, 0 fee
+ //
+ // chan9 1-to-2: enabled, 1001 msat fee
+ // chan9 2-to-1: enabled, 0 fee
+ //
+ // chan10 1-to-2: enabled, 0 fee
+ // chan10 2-to-1: enabled, 0 fee
+ //
+ // chan11 1-to-2: enabled, 0 fee
+ // chan11 2-to-1: enabled, 0 fee
+
+ let node1 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0202020202020202020202020202020202020202020202020202020202020202").unwrap()[..]).unwrap());
+ let node2 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0303030303030303030303030303030303030303030303030303030303030303").unwrap()[..]).unwrap());
+ let node3 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0404040404040404040404040404040404040404040404040404040404040404").unwrap()[..]).unwrap());
+ let node4 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()[..]).unwrap());
+ let node5 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0606060606060606060606060606060606060606060606060606060606060606").unwrap()[..]).unwrap());
+ let node6 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0707070707070707070707070707070707070707070707070707070707070707").unwrap()[..]).unwrap());
+ let node7 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0808080808080808080808080808080808080808080808080808080808080808").unwrap()[..]).unwrap());
+ let node8 = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&hex::decode("0909090909090909090909090909090909090909090909090909090909090909").unwrap()[..]).unwrap());
+
+ let zero_hash = Sha256dHash::hash(&[0; 32]);
+
+ {
+ let mut network = router.network_map.write().unwrap();
+
+ network.nodes.insert(node1.clone(), NodeInfo {
+ channels: vec!(NetworkMap::get_key(1, zero_hash.clone()), NetworkMap::get_key(3, zero_hash.clone())),
+ lowest_inbound_channel_fee_base_msat: 100,
+ lowest_inbound_channel_fee_proportional_millionths: 0,
+ features: GlobalFeatures::new(),
+ last_update: 1,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(1, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: our_id.clone(),
+ last_update: 0,
+ enabled: false,
+ cltv_expiry_delta: u16::max_value(), // This value should be ignored
+ htlc_minimum_msat: 0,
+ fee_base_msat: u32::max_value(), // This value should be ignored
+ fee_proportional_millionths: u32::max_value(), // This value should be ignored
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node1.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.nodes.insert(node2.clone(), NodeInfo {
+ channels: vec!(NetworkMap::get_key(2, zero_hash.clone()), NetworkMap::get_key(4, zero_hash.clone())),
+ lowest_inbound_channel_fee_base_msat: 0,
+ lowest_inbound_channel_fee_proportional_millionths: 0,
+ features: GlobalFeatures::new(),
+ last_update: 1,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(2, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: our_id.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: u16::max_value(), // This value should be ignored
+ htlc_minimum_msat: 0,
+ fee_base_msat: u32::max_value(), // This value should be ignored
+ fee_proportional_millionths: u32::max_value(), // This value should be ignored
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node2.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.nodes.insert(node8.clone(), NodeInfo {
+ channels: vec!(NetworkMap::get_key(12, zero_hash.clone()), NetworkMap::get_key(13, zero_hash.clone())),
+ lowest_inbound_channel_fee_base_msat: 0,
+ lowest_inbound_channel_fee_proportional_millionths: 0,
+ features: GlobalFeatures::new(),
+ last_update: 1,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(12, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: our_id.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: u16::max_value(), // This value should be ignored
+ htlc_minimum_msat: 0,
+ fee_base_msat: u32::max_value(), // This value should be ignored
+ fee_proportional_millionths: u32::max_value(), // This value should be ignored
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node8.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.nodes.insert(node3.clone(), NodeInfo {
+ channels: vec!(
+ NetworkMap::get_key(3, zero_hash.clone()),
+ NetworkMap::get_key(4, zero_hash.clone()),
+ NetworkMap::get_key(13, zero_hash.clone()),
+ NetworkMap::get_key(5, zero_hash.clone()),
+ NetworkMap::get_key(6, zero_hash.clone()),
+ NetworkMap::get_key(7, zero_hash.clone())),
+ lowest_inbound_channel_fee_base_msat: 0,
+ lowest_inbound_channel_fee_proportional_millionths: 0,
+ features: GlobalFeatures::new(),
+ last_update: 1,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(3, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: node1.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (3 << 8) | 1,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node3.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (3 << 8) | 2,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 100,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(4, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: node2.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (4 << 8) | 1,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 1000000,
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node3.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (4 << 8) | 2,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(13, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: node8.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (13 << 8) | 1,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 2000000,
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node3.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (13 << 8) | 2,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.nodes.insert(node4.clone(), NodeInfo {
+ channels: vec!(NetworkMap::get_key(5, zero_hash.clone()), NetworkMap::get_key(11, zero_hash.clone())),
+ lowest_inbound_channel_fee_base_msat: 0,
+ lowest_inbound_channel_fee_proportional_millionths: 0,
+ features: GlobalFeatures::new(),
+ last_update: 1,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(5, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: node3.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (5 << 8) | 1,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 100,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node4.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (5 << 8) | 2,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.nodes.insert(node5.clone(), NodeInfo {
+ channels: vec!(NetworkMap::get_key(6, zero_hash.clone()), NetworkMap::get_key(11, zero_hash.clone())),
+ lowest_inbound_channel_fee_base_msat: 0,
+ lowest_inbound_channel_fee_proportional_millionths: 0,
+ features: GlobalFeatures::new(),
+ last_update: 1,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(6, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: node3.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (6 << 8) | 1,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node5.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (6 << 8) | 2,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(11, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: node5.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (11 << 8) | 1,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node4.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (11 << 8) | 2,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ network.nodes.insert(node6.clone(), NodeInfo {
+ channels: vec!(NetworkMap::get_key(7, zero_hash.clone())),
+ lowest_inbound_channel_fee_base_msat: 0,
+ lowest_inbound_channel_fee_proportional_millionths: 0,
+ features: GlobalFeatures::new(),
+ last_update: 1,
+ rgb: [0; 3],
+ alias: [0; 32],
+ addresses: Vec::new(),
+ announcement_message: None,
+ });
+ network.channels.insert(NetworkMap::get_key(7, zero_hash.clone()), ChannelInfo {
+ features: GlobalFeatures::new(),
+ one_to_two: DirectionalChannelInfo {
+ src_node_id: node3.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (7 << 8) | 1,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 1000000,
+ last_update_message: None,
+ }, two_to_one: DirectionalChannelInfo {
+ src_node_id: node6.clone(),
+ last_update: 0,
+ enabled: true,
+ cltv_expiry_delta: (7 << 8) | 2,
+ htlc_minimum_msat: 0,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ last_update_message: None,
+ },
+ announcement_message: None,
+ });
+ }
+
+ { // Simple route to 3 via 2
+ let route = router.get_route(&node3, None, &Vec::new(), 100, 42).unwrap();
+ assert_eq!(route.hops.len(), 2);
+
+ assert_eq!(route.hops[0].pubkey, node2);
+ assert_eq!(route.hops[0].short_channel_id, 2);
+ assert_eq!(route.hops[0].fee_msat, 100);
+ assert_eq!(route.hops[0].cltv_expiry_delta, (4 << 8) | 1);
+
+ assert_eq!(route.hops[1].pubkey, node3);
+ assert_eq!(route.hops[1].short_channel_id, 4);
+ assert_eq!(route.hops[1].fee_msat, 100);
+ assert_eq!(route.hops[1].cltv_expiry_delta, 42);
+ }
+
+ { // Route to 1 via 2 and 3 because our channel to 1 is disabled
+ let route = router.get_route(&node1, None, &Vec::new(), 100, 42).unwrap();
+ assert_eq!(route.hops.len(), 3);
+
+ assert_eq!(route.hops[0].pubkey, node2);
+ assert_eq!(route.hops[0].short_channel_id, 2);
+ assert_eq!(route.hops[0].fee_msat, 200);
+ assert_eq!(route.hops[0].cltv_expiry_delta, (4 << 8) | 1);
+
+ assert_eq!(route.hops[1].pubkey, node3);
+ assert_eq!(route.hops[1].short_channel_id, 4);
+ assert_eq!(route.hops[1].fee_msat, 100);
+ assert_eq!(route.hops[1].cltv_expiry_delta, (3 << 8) | 2);
+
+ assert_eq!(route.hops[2].pubkey, node1);
+ assert_eq!(route.hops[2].short_channel_id, 3);
+ assert_eq!(route.hops[2].fee_msat, 100);
+ assert_eq!(route.hops[2].cltv_expiry_delta, 42);
+ }
+
+ { // If we specify a channel to node8, that overrides our local channel view and that gets used
+ let our_chans = vec![channelmanager::ChannelDetails {
+ channel_id: [0; 32],
+ short_channel_id: Some(42),
+ remote_network_id: node8.clone(),
+ channel_value_satoshis: 0,
+ user_id: 0,
+ outbound_capacity_msat: 0,
+ inbound_capacity_msat: 0,
+ is_live: true,
+ }];
+ let route = router.get_route(&node3, Some(&our_chans), &Vec::new(), 100, 42).unwrap();
+ assert_eq!(route.hops.len(), 2);
+
+ assert_eq!(route.hops[0].pubkey, node8);
+ assert_eq!(route.hops[0].short_channel_id, 42);
+ assert_eq!(route.hops[0].fee_msat, 200);
+ assert_eq!(route.hops[0].cltv_expiry_delta, (13 << 8) | 1);
+
+ assert_eq!(route.hops[1].pubkey, node3);
+ assert_eq!(route.hops[1].short_channel_id, 13);
+ assert_eq!(route.hops[1].fee_msat, 100);
+ assert_eq!(route.hops[1].cltv_expiry_delta, 42);
+ }
+
+ let mut last_hops = vec!(RouteHint {
+ src_node_id: node4.clone(),
+ short_channel_id: 8,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ cltv_expiry_delta: (8 << 8) | 1,
+ htlc_minimum_msat: 0,
+ }, RouteHint {
+ src_node_id: node5.clone(),
+ short_channel_id: 9,
+ fee_base_msat: 1001,
+ fee_proportional_millionths: 0,
+ cltv_expiry_delta: (9 << 8) | 1,
+ htlc_minimum_msat: 0,
+ }, RouteHint {
+ src_node_id: node6.clone(),
+ short_channel_id: 10,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ cltv_expiry_delta: (10 << 8) | 1,
+ htlc_minimum_msat: 0,
+ });
+
+ { // Simple test across 2, 3, 5, and 4 via a last_hop channel
+ let route = router.get_route(&node7, None, &last_hops, 100, 42).unwrap();
+ assert_eq!(route.hops.len(), 5);
+
+ assert_eq!(route.hops[0].pubkey, node2);
+ assert_eq!(route.hops[0].short_channel_id, 2);
+ assert_eq!(route.hops[0].fee_msat, 100);
+ assert_eq!(route.hops[0].cltv_expiry_delta, (4 << 8) | 1);
+
+ assert_eq!(route.hops[1].pubkey, node3);
+ assert_eq!(route.hops[1].short_channel_id, 4);
+ assert_eq!(route.hops[1].fee_msat, 0);
+ assert_eq!(route.hops[1].cltv_expiry_delta, (6 << 8) | 1);
+
+ assert_eq!(route.hops[2].pubkey, node5);
+ assert_eq!(route.hops[2].short_channel_id, 6);
+ assert_eq!(route.hops[2].fee_msat, 0);
+ assert_eq!(route.hops[2].cltv_expiry_delta, (11 << 8) | 1);
+
+ assert_eq!(route.hops[3].pubkey, node4);
+ assert_eq!(route.hops[3].short_channel_id, 11);
+ assert_eq!(route.hops[3].fee_msat, 0);
+ assert_eq!(route.hops[3].cltv_expiry_delta, (8 << 8) | 1);
+
+ assert_eq!(route.hops[4].pubkey, node7);
+ assert_eq!(route.hops[4].short_channel_id, 8);
+ assert_eq!(route.hops[4].fee_msat, 100);
+ assert_eq!(route.hops[4].cltv_expiry_delta, 42);
+ }
+
+ { // Simple test with outbound channel to 4 to test that last_hops and first_hops connect
+ let our_chans = vec![channelmanager::ChannelDetails {
+ channel_id: [0; 32],
+ short_channel_id: Some(42),
+ remote_network_id: node4.clone(),
+ channel_value_satoshis: 0,
+ user_id: 0,
+ outbound_capacity_msat: 0,
+ inbound_capacity_msat: 0,
+ is_live: true,
+ }];
+ let route = router.get_route(&node7, Some(&our_chans), &last_hops, 100, 42).unwrap();
+ assert_eq!(route.hops.len(), 2);
+
+ assert_eq!(route.hops[0].pubkey, node4);
+ assert_eq!(route.hops[0].short_channel_id, 42);
+ assert_eq!(route.hops[0].fee_msat, 0);
+ assert_eq!(route.hops[0].cltv_expiry_delta, (8 << 8) | 1);
+
+ assert_eq!(route.hops[1].pubkey, node7);
+ assert_eq!(route.hops[1].short_channel_id, 8);
+ assert_eq!(route.hops[1].fee_msat, 100);
+ assert_eq!(route.hops[1].cltv_expiry_delta, 42);
+ }
+
+ last_hops[0].fee_base_msat = 1000;
+
+ { // Revert to via 6 as the fee on 8 goes up
+ let route = router.get_route(&node7, None, &last_hops, 100, 42).unwrap();
+ assert_eq!(route.hops.len(), 4);
+
+ assert_eq!(route.hops[0].pubkey, node2);
+ assert_eq!(route.hops[0].short_channel_id, 2);
+ assert_eq!(route.hops[0].fee_msat, 200); // fee increased as its % of value transferred across node
+ assert_eq!(route.hops[0].cltv_expiry_delta, (4 << 8) | 1);
+
+ assert_eq!(route.hops[1].pubkey, node3);
+ assert_eq!(route.hops[1].short_channel_id, 4);
+ assert_eq!(route.hops[1].fee_msat, 100);
+ assert_eq!(route.hops[1].cltv_expiry_delta, (7 << 8) | 1);
+
+ assert_eq!(route.hops[2].pubkey, node6);
+ assert_eq!(route.hops[2].short_channel_id, 7);
+ assert_eq!(route.hops[2].fee_msat, 0);
+ assert_eq!(route.hops[2].cltv_expiry_delta, (10 << 8) | 1);
+
+ assert_eq!(route.hops[3].pubkey, node7);
+ assert_eq!(route.hops[3].short_channel_id, 10);
+ assert_eq!(route.hops[3].fee_msat, 100);
+ assert_eq!(route.hops[3].cltv_expiry_delta, 42);
+ }
+
+ { // ...but still use 8 for larger payments as 6 has a variable feerate
+ let route = router.get_route(&node7, None, &last_hops, 2000, 42).unwrap();
+ assert_eq!(route.hops.len(), 5);
+
+ assert_eq!(route.hops[0].pubkey, node2);
+ assert_eq!(route.hops[0].short_channel_id, 2);
+ assert_eq!(route.hops[0].fee_msat, 3000);
+ assert_eq!(route.hops[0].cltv_expiry_delta, (4 << 8) | 1);
+
+ assert_eq!(route.hops[1].pubkey, node3);
+ assert_eq!(route.hops[1].short_channel_id, 4);
+ assert_eq!(route.hops[1].fee_msat, 0);
+ assert_eq!(route.hops[1].cltv_expiry_delta, (6 << 8) | 1);
+
+ assert_eq!(route.hops[2].pubkey, node5);
+ assert_eq!(route.hops[2].short_channel_id, 6);
+ assert_eq!(route.hops[2].fee_msat, 0);
+ assert_eq!(route.hops[2].cltv_expiry_delta, (11 << 8) | 1);
+
+ assert_eq!(route.hops[3].pubkey, node4);
+ assert_eq!(route.hops[3].short_channel_id, 11);
+ assert_eq!(route.hops[3].fee_msat, 1000);
+ assert_eq!(route.hops[3].cltv_expiry_delta, (8 << 8) | 1);
+
+ assert_eq!(route.hops[4].pubkey, node7);
+ assert_eq!(route.hops[4].short_channel_id, 8);
+ assert_eq!(route.hops[4].fee_msat, 2000);
+ assert_eq!(route.hops[4].cltv_expiry_delta, 42);
+ }
+
+ { // Test Router serialization/deserialization
+ let mut w = TestVecWriter(Vec::new());
+ let network = router.network_map.read().unwrap();
+ assert!(!network.channels.is_empty());
+ assert!(!network.nodes.is_empty());
+ network.write(&mut w).unwrap();
+ assert!(<NetworkMap>::read(&mut ::std::io::Cursor::new(&w.0)).unwrap() == *network);
+ }
+ }
+}
--- /dev/null
- action: Option<msgs::ErrorAction>
+//! Events are returned from various bits in the library which indicate some action must be taken
+//! by the client.
+//!
+//! Because we don't have a built-in runtime, it's up to the client to call events at a time in the
+//! future, as well as generate and broadcast funding transactions handle payment preimages and a
+//! few other things.
+//!
+//! Note that many events are handled for you by PeerHandler, so in the common design of having a
+//! PeerManager which marshalls messages to ChannelManager and Router you only need to call
+//! process_events on the PeerHandler and then get_and_clear_pending_events and handle the events
+//! that bubble up to the surface. If, however, you do not have a PeerHandler managing a
+//! ChannelManager you need to handle all of the events which may be generated.
+//TODO: We need better separation of event types ^
+
+use ln::msgs;
+use ln::channelmanager::{PaymentPreimage, PaymentHash};
+use chain::transaction::OutPoint;
+use chain::keysinterface::SpendableOutputDescriptor;
+
+use bitcoin::blockdata::script::Script;
+
+use secp256k1::key::PublicKey;
+
+use std::time::Duration;
+
+/// An Event which you should probably take some action in response to.
+pub enum Event {
+ /// Used to indicate that the client should generate a funding transaction with the given
+ /// parameters and then call ChannelManager::funding_transaction_generated.
+ /// Generated in ChannelManager message handling.
+ /// Note that *all inputs* in the funding transaction must spend SegWit outputs or your
+ /// counterparty can steal your funds!
+ FundingGenerationReady {
+ /// The random channel_id we picked which you'll need to pass into
+ /// ChannelManager::funding_transaction_generated.
+ temporary_channel_id: [u8; 32],
+ /// The value, in satoshis, that the output should have.
+ channel_value_satoshis: u64,
+ /// The script which should be used in the transaction output.
+ output_script: Script,
+ /// The value passed in to ChannelManager::create_channel
+ user_channel_id: u64,
+ },
+ /// Used to indicate that the client may now broadcast the funding transaction it created for a
+ /// channel. Broadcasting such a transaction prior to this event may lead to our counterparty
+ /// trivially stealing all funds in the funding transaction!
+ FundingBroadcastSafe {
+ /// The output, which was passed to ChannelManager::funding_transaction_generated, which is
+ /// now safe to broadcast.
+ funding_txo: OutPoint,
+ /// The value passed in to ChannelManager::create_channel
+ user_channel_id: u64,
+ },
+ /// Indicates we've received money! Just gotta dig out that payment preimage and feed it to
+ /// ChannelManager::claim_funds to get it....
+ /// Note that if the preimage is not known or the amount paid is incorrect, you must call
+ /// ChannelManager::fail_htlc_backwards to free up resources for this HTLC.
+ /// The amount paid should be considered 'incorrect' when it is less than or more than twice
+ /// the amount expected.
+ PaymentReceived {
+ /// The hash for which the preimage should be handed to the ChannelManager.
+ payment_hash: PaymentHash,
+ /// The value, in thousandths of a satoshi, that this payment is for. Note that you must
+ /// compare this to the expected value before accepting the payment (as otherwise you are
+ /// providing proof-of-payment for less than the value you expected!).
+ amt: u64,
+ },
+ /// Indicates an outbound payment we made succeeded (ie it made it all the way to its target
+ /// and we got back the payment preimage for it).
+ /// Note that duplicative PaymentSent Events may be generated - it is your responsibility to
+ /// deduplicate them by payment_preimage (which MUST be unique)!
+ PaymentSent {
+ /// The preimage to the hash given to ChannelManager::send_payment.
+ /// Note that this serves as a payment receipt, if you wish to have such a thing, you must
+ /// store it somehow!
+ payment_preimage: PaymentPreimage,
+ },
+ /// Indicates an outbound payment we made failed. Probably some intermediary node dropped
+ /// something. You may wish to retry with a different route.
+ /// Note that duplicative PaymentFailed Events may be generated - it is your responsibility to
+ /// deduplicate them by payment_hash (which MUST be unique)!
+ PaymentFailed {
+ /// The hash which was given to ChannelManager::send_payment.
+ payment_hash: PaymentHash,
+ /// Indicates the payment was rejected for some reason by the recipient. This implies that
+ /// the payment has failed, not just the route in question. If this is not set, you may
+ /// retry the payment via a different route.
+ rejected_by_dest: bool,
+#[cfg(test)]
+ error_code: Option<u16>,
+ },
+ /// Used to indicate that ChannelManager::process_pending_htlc_forwards should be called at a
+ /// time in the future.
+ PendingHTLCsForwardable {
+ /// The minimum amount of time that should be waited prior to calling
+ /// process_pending_htlc_forwards. To increase the effort required to correlate payments,
+ /// you should wait a random amount of time in roughly the range (now + time_forwardable,
+ /// now + 5*time_forwardable).
+ time_forwardable: Duration,
+ },
+ /// Used to indicate that an output was generated on-chain which you should know how to spend.
+ /// Such an output will *not* ever be spent by rust-lightning, so you need to store them
+ /// somewhere and spend them when you create on-chain spends.
+ SpendableOutputs {
+ /// The outputs which you should store as spendable by you.
+ outputs: Vec<SpendableOutputDescriptor>,
+ },
+}
+
+/// An event generated by ChannelManager which indicates a message should be sent to a peer (or
+/// broadcast to most peers).
+/// These events are handled by PeerManager::process_events if you are using a PeerManager.
+#[derive(Clone)]
+pub enum MessageSendEvent {
+ /// Used to indicate that we've accepted a channel open and should send the accept_channel
+ /// message provided to the given peer.
+ SendAcceptChannel {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::AcceptChannel,
+ },
+ /// Used to indicate that we've initiated a channel open and should send the open_channel
+ /// message provided to the given peer.
+ SendOpenChannel {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::OpenChannel,
+ },
+ /// Used to indicate that a funding_created message should be sent to the peer with the given node_id.
+ SendFundingCreated {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::FundingCreated,
+ },
+ /// Used to indicate that a funding_signed message should be sent to the peer with the given node_id.
+ SendFundingSigned {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::FundingSigned,
+ },
+ /// Used to indicate that a funding_locked message should be sent to the peer with the given node_id.
+ SendFundingLocked {
+ /// The node_id of the node which should receive these message(s)
+ node_id: PublicKey,
+ /// The funding_locked message which should be sent.
+ msg: msgs::FundingLocked,
+ },
+ /// Used to indicate that an announcement_signatures message should be sent to the peer with the given node_id.
+ SendAnnouncementSignatures {
+ /// The node_id of the node which should receive these message(s)
+ node_id: PublicKey,
+ /// The announcement_signatures message which should be sent.
+ msg: msgs::AnnouncementSignatures,
+ },
+ /// Used to indicate that a series of HTLC update messages, as well as a commitment_signed
+ /// message should be sent to the peer with the given node_id.
+ UpdateHTLCs {
+ /// The node_id of the node which should receive these message(s)
+ node_id: PublicKey,
+ /// The update messages which should be sent. ALL messages in the struct should be sent!
+ updates: msgs::CommitmentUpdate,
+ },
+ /// Used to indicate that a revoke_and_ack message should be sent to the peer with the given node_id.
+ SendRevokeAndACK {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::RevokeAndACK,
+ },
+ /// Used to indicate that a closing_signed message should be sent to the peer with the given node_id.
+ SendClosingSigned {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::ClosingSigned,
+ },
+ /// Used to indicate that a shutdown message should be sent to the peer with the given node_id.
+ SendShutdown {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::Shutdown,
+ },
+ /// Used to indicate that a channel_reestablish message should be sent to the peer with the given node_id.
+ SendChannelReestablish {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The message which should be sent.
+ msg: msgs::ChannelReestablish,
+ },
+ /// Used to indicate that a channel_announcement and channel_update should be broadcast to all
+ /// peers (except the peer with node_id either msg.contents.node_id_1 or msg.contents.node_id_2).
+ BroadcastChannelAnnouncement {
+ /// The channel_announcement which should be sent.
+ msg: msgs::ChannelAnnouncement,
+ /// The followup channel_update which should be sent.
+ update_msg: msgs::ChannelUpdate,
+ },
+ /// Used to indicate that a channel_update should be broadcast to all peers.
+ BroadcastChannelUpdate {
+ /// The channel_update which should be sent.
+ msg: msgs::ChannelUpdate,
+ },
+ /// Broadcast an error downstream to be handled
+ HandleError {
+ /// The node_id of the node which should receive this message
+ node_id: PublicKey,
+ /// The action which should be taken.
++ action: msgs::ErrorAction
+ },
+ /// When a payment fails we may receive updates back from the hop where it failed. In such
+ /// cases this event is generated so that we can inform the router of this information.
+ PaymentFailureNetworkUpdate {
+ /// The channel/node update which should be sent to router
+ update: msgs::HTLCFailChannelUpdate,
+ }
+}
+
+/// A trait indicating an object may generate message send events
+pub trait MessageSendEventsProvider {
+ /// Gets the list of pending events which were generated by previous actions, clearing the list
+ /// in the process.
+ fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent>;
+}
+
+/// A trait indicating an object may generate events
+pub trait EventsProvider {
+ /// Gets the list of pending events which were generated by previous actions, clearing the list
+ /// in the process.
+ fn get_and_clear_pending_events(&self) -> Vec<Event>;
+}
--- /dev/null
- use ln::msgs::{HandleError};
+use chain::chaininterface;
+use chain::chaininterface::ConfirmationTarget;
+use chain::transaction::OutPoint;
+use chain::keysinterface;
+use ln::channelmonitor;
+use ln::msgs;
+use ln::msgs::LocalFeatures;
- fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++use ln::msgs::{LightningError};
+use ln::channelmonitor::HTLCUpdate;
+use util::events;
+use util::logger::{Logger, Level, Record};
+use util::ser::{ReadableArgs, Writer};
+
+use bitcoin::blockdata::transaction::Transaction;
+use bitcoin::blockdata::script::Script;
+use bitcoin_hashes::sha256d::Hash as Sha256dHash;
+use bitcoin::network::constants::Network;
+
+use secp256k1::{SecretKey, PublicKey};
+
+use std::time::{SystemTime, UNIX_EPOCH};
+use std::sync::{Arc,Mutex};
+use std::{mem};
+
+pub struct TestVecWriter(pub Vec<u8>);
+impl Writer for TestVecWriter {
+ fn write_all(&mut self, buf: &[u8]) -> Result<(), ::std::io::Error> {
+ self.0.extend_from_slice(buf);
+ Ok(())
+ }
+ fn size_hint(&mut self, size: usize) {
+ self.0.reserve_exact(size);
+ }
+}
+
+pub struct TestFeeEstimator {
+ pub sat_per_kw: u64,
+}
+impl chaininterface::FeeEstimator for TestFeeEstimator {
+ fn get_est_sat_per_1000_weight(&self, _confirmation_target: ConfirmationTarget) -> u64 {
+ self.sat_per_kw
+ }
+}
+
+pub struct TestChannelMonitor {
+ pub added_monitors: Mutex<Vec<(OutPoint, channelmonitor::ChannelMonitor)>>,
+ pub simple_monitor: Arc<channelmonitor::SimpleManyChannelMonitor<OutPoint>>,
+ pub update_ret: Mutex<Result<(), channelmonitor::ChannelMonitorUpdateErr>>,
+}
+impl TestChannelMonitor {
+ pub fn new(chain_monitor: Arc<chaininterface::ChainWatchInterface>, broadcaster: Arc<chaininterface::BroadcasterInterface>, logger: Arc<Logger>, fee_estimator: Arc<chaininterface::FeeEstimator>) -> Self {
+ Self {
+ added_monitors: Mutex::new(Vec::new()),
+ simple_monitor: channelmonitor::SimpleManyChannelMonitor::new(chain_monitor, broadcaster, logger, fee_estimator),
+ update_ret: Mutex::new(Ok(())),
+ }
+ }
+}
+impl channelmonitor::ManyChannelMonitor for TestChannelMonitor {
+ fn add_update_monitor(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result<(), channelmonitor::ChannelMonitorUpdateErr> {
+ // At every point where we get a monitor update, we should be able to send a useful monitor
+ // to a watchtower and disk...
+ let mut w = TestVecWriter(Vec::new());
+ monitor.write_for_disk(&mut w).unwrap();
+ assert!(<(Sha256dHash, channelmonitor::ChannelMonitor)>::read(
+ &mut ::std::io::Cursor::new(&w.0), Arc::new(TestLogger::new())).unwrap().1 == monitor);
+ w.0.clear();
+ monitor.write_for_watchtower(&mut w).unwrap(); // This at least shouldn't crash...
+ self.added_monitors.lock().unwrap().push((funding_txo, monitor.clone()));
+ assert!(self.simple_monitor.add_update_monitor(funding_txo, monitor).is_ok());
+ self.update_ret.lock().unwrap().clone()
+ }
+
+ fn fetch_pending_htlc_updated(&self) -> Vec<HTLCUpdate> {
+ return self.simple_monitor.fetch_pending_htlc_updated();
+ }
+}
+
+pub struct TestBroadcaster {
+ pub txn_broadcasted: Mutex<Vec<Transaction>>,
+}
+impl chaininterface::BroadcasterInterface for TestBroadcaster {
+ fn broadcast_transaction(&self, tx: &Transaction) {
+ self.txn_broadcasted.lock().unwrap().push(tx.clone());
+ }
+}
+
+pub struct TestChannelMessageHandler {
+ pub pending_events: Mutex<Vec<events::MessageSendEvent>>,
+}
+
+impl TestChannelMessageHandler {
+ pub fn new() -> Self {
+ TestChannelMessageHandler {
+ pending_events: Mutex::new(Vec::new()),
+ }
+ }
+}
+
+impl msgs::ChannelMessageHandler for TestChannelMessageHandler {
- fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_open_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::OpenChannel) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_accept_channel(&self, _their_node_id: &PublicKey, _their_local_features: LocalFeatures, _msg: &msgs::AcceptChannel) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_funding_created(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingCreated) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_funding_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingSigned) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_funding_locked(&self, _their_node_id: &PublicKey, _msg: &msgs::FundingLocked) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_shutdown(&self, _their_node_id: &PublicKey, _msg: &msgs::Shutdown) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_closing_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::ClosingSigned) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_update_add_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateAddHTLC) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_update_fulfill_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFulfillHTLC) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_update_fail_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailHTLC) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_update_fail_malformed_htlc(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_commitment_signed(&self, _their_node_id: &PublicKey, _msg: &msgs::CommitmentSigned) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_revoke_and_ack(&self, _their_node_id: &PublicKey, _msg: &msgs::RevokeAndACK) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_update_fee(&self, _their_node_id: &PublicKey, _msg: &msgs::UpdateFee) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(), HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_announcement_signatures(&self, _their_node_id: &PublicKey, _msg: &msgs::AnnouncementSignatures) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_channel_reestablish(&self, _their_node_id: &PublicKey, _msg: &msgs::ChannelReestablish) -> Result<(), LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
+ fn peer_disconnected(&self, _their_node_id: &PublicKey, _no_connection_possible: bool) {}
+ fn peer_connected(&self, _their_node_id: &PublicKey) {}
+ fn handle_error(&self, _their_node_id: &PublicKey, _msg: &msgs::ErrorMessage) {}
+}
+
+impl events::MessageSendEventsProvider for TestChannelMessageHandler {
+ fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ let mut ret = Vec::new();
+ mem::swap(&mut ret, &mut *pending_events);
+ ret
+ }
+}
+
+pub struct TestRoutingMessageHandler {}
+
+impl TestRoutingMessageHandler {
+ pub fn new() -> Self {
+ TestRoutingMessageHandler {}
+ }
+}
+impl msgs::RoutingMessageHandler for TestRoutingMessageHandler {
- fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_node_announcement(&self, _msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
- fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, HandleError> {
- Err(HandleError { err: "", action: None })
++ fn handle_channel_announcement(&self, _msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
++ fn handle_channel_update(&self, _msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> {
++ Err(LightningError { err: "", action: msgs::ErrorAction::IgnoreError })
+ }
+ fn handle_htlc_fail_channel_update(&self, _update: &msgs::HTLCFailChannelUpdate) {}
+ fn get_next_channel_announcements(&self, _starting_point: u64, _batch_amount: u8) -> Vec<(msgs::ChannelAnnouncement, msgs::ChannelUpdate,msgs::ChannelUpdate)> {
+ Vec::new()
+ }
+ fn get_next_node_announcements(&self, _starting_point: Option<&PublicKey>, _batch_amount: u8) -> Vec<msgs::NodeAnnouncement> {
+ Vec::new()
+ }
+}
+
+pub struct TestLogger {
+ level: Level,
+ id: String,
+}
+
+impl TestLogger {
+ pub fn new() -> TestLogger {
+ Self::with_id("".to_owned())
+ }
+ pub fn with_id(id: String) -> TestLogger {
+ TestLogger {
+ level: Level::Trace,
+ id,
+ }
+ }
+ pub fn enable(&mut self, level: Level) {
+ self.level = level;
+ }
+}
+
+impl Logger for TestLogger {
+ fn log(&self, record: &Record) {
+ if self.level >= record.level {
+ println!("{:<5} {} [{} : {}, {}] {}", record.level.to_string(), self.id, record.module_path, record.file, record.line, record.args);
+ }
+ }
+}
+
+pub struct TestKeysInterface {
+ backing: keysinterface::KeysManager,
+ pub override_session_priv: Mutex<Option<SecretKey>>,
+ pub override_channel_id_priv: Mutex<Option<[u8; 32]>>,
+}
+
+impl keysinterface::KeysInterface for TestKeysInterface {
+ fn get_node_secret(&self) -> SecretKey { self.backing.get_node_secret() }
+ fn get_destination_script(&self) -> Script { self.backing.get_destination_script() }
+ fn get_shutdown_pubkey(&self) -> PublicKey { self.backing.get_shutdown_pubkey() }
+ fn get_channel_keys(&self, inbound: bool) -> keysinterface::ChannelKeys { self.backing.get_channel_keys(inbound) }
+
+ fn get_session_key(&self) -> SecretKey {
+ match *self.override_session_priv.lock().unwrap() {
+ Some(key) => key.clone(),
+ None => self.backing.get_session_key()
+ }
+ }
+
+ fn get_channel_id(&self) -> [u8; 32] {
+ match *self.override_channel_id_priv.lock().unwrap() {
+ Some(key) => key.clone(),
+ None => self.backing.get_channel_id()
+ }
+ }
+}
+
+impl TestKeysInterface {
+ pub fn new(seed: &[u8; 32], network: Network, logger: Arc<Logger>) -> Self {
+ let now = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards");
+ Self {
+ backing: keysinterface::KeysManager::new(seed, network, logger, now.as_secs(), now.subsec_nanos()),
+ override_session_priv: Mutex::new(None),
+ override_channel_id_priv: Mutex::new(None),
+ }
+ }
+}