X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=8a570d264e3e70a31369cc23094f97458a964f2e;hb=7c8e740b6e82feeb60938083dcf677ea7e21f7bd;hp=e4d7a33c8c16c610974c2390c09a235847d89e78;hpb=142b0d624e8ef71aea4aeb1d3591c6a5b59a771d;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index e4d7a33c..8a570d26 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -15,7 +15,7 @@ use chain::Watch; use chain::channelmonitor; use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use chain::transaction::OutPoint; -use chain::keysinterface::{ChannelKeys, KeysInterface, SpendableOutputDescriptor}; +use chain::keysinterface::{Sign, KeysInterface}; use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC}; use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure, BREAKDOWN_TIMEOUT}; use ln::channel::{Channel, ChannelError}; @@ -24,7 +24,7 @@ use routing::router::{Route, RouteHop, get_route}; use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction}; -use util::enforcing_trait_impls::EnforcingChannelKeys; +use util::enforcing_trait_impls::EnforcingSigner; use util::{byte_utils, test_utils}; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; use util::errors::APIError; @@ -33,12 +33,8 @@ use util::config::UserConfig; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hash_types::{Txid, BlockHash}; -use bitcoin::util::bip143; -use bitcoin::util::address::Address; -use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey}; use bitcoin::blockdata::block::{Block, BlockHeader}; -use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType}; -use bitcoin::blockdata::script::{Builder, Script}; +use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; @@ -59,6 +55,7 @@ use std::mem; use ln::functional_test_utils::*; use ln::chan_utils::CommitmentTransaction; +use ln::msgs::OptionalField::Present; #[test] fn test_insane_channel_opens() { @@ -71,7 +68,7 @@ fn test_insane_channel_opens() { // Instantiate channel parameters where we push the maximum msats given our // funding satoshis let channel_value_sat = 31337; // same as funding satoshis - let channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value_sat); + let channel_reserve_satoshis = Channel::::get_holder_selected_channel_reserve_satoshis(channel_value_sat); let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000; // Have node0 initiate a channel to node1 with aforementioned parameters @@ -836,9 +833,9 @@ fn pre_funding_lock_shutdown_test() { nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed); @@ -866,9 +863,9 @@ fn updates_shutdown_wait() { nodes[0].node.close_channel(&chan_1.2).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -953,13 +950,13 @@ fn htlc_fail_async_shutdown() { nodes[1].node.close_channel(&chan_1.2).unwrap(); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1021,10 +1018,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[1].node.close_channel(&chan_1.2).unwrap(); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); if recv_count > 0 { - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); if recv_count > 1 { - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); } } @@ -1043,14 +1040,14 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish); let node_0_2nd_shutdown = if recv_count > 0 { let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); node_0_2nd_shutdown } else { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown); get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()) }; - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1110,10 +1107,10 @@ fn do_test_shutdown_rebroadcast(recv_count: u8) { let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); assert!(node_1_3rd_shutdown == node_1_2nd_shutdown); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown); let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); assert!(node_0_closing_signed == node_0_2nd_closing_signed); @@ -1583,7 +1580,7 @@ fn test_fee_spike_violation_fails_htlc() { let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap(); (route, payment_hash, payment_preimage) }} - }; + } let (route, payment_hash, _) = get_route_and_payment_hash!(3460001); // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() @@ -1614,24 +1611,24 @@ fn test_fee_spike_violation_fails_htlc() { const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; - // Get the EnforcingChannelKeys for each channel, which will be used to (1) get the keys + // Get the EnforcingSigner for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point) = { let chan_lock = nodes[0].node.channel_state.lock().unwrap(); let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); - let chan_keys = local_chan.get_keys(); - let pubkeys = chan_keys.pubkeys(); + let chan_signer = local_chan.get_signer(); + let pubkeys = chan_signer.pubkeys(); (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, - chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER), - chan_keys.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx)) + chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER), + chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx)) }; let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point) = { let chan_lock = nodes[1].node.channel_state.lock().unwrap(); let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); - let chan_keys = remote_chan.get_keys(); - let pubkeys = chan_keys.pubkeys(); + let chan_signer = remote_chan.get_signer(); + let pubkeys = chan_signer.pubkeys(); (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, - chan_keys.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx)) + chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx)) }; // Assemble the set of keys we can use for signatures for our commitment_signed message. @@ -1655,7 +1652,7 @@ fn test_fee_spike_violation_fails_htlc() { let res = { let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); - let local_chan_keys = local_chan.get_keys(); + let local_chan_signer = local_chan.get_signer(); let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data( commitment_number, 95000, @@ -1665,7 +1662,7 @@ fn test_fee_spike_violation_fails_htlc() { &mut vec![(accepted_htlc_info, ())], &local_chan.channel_transaction_parameters.as_counterparty_broadcastable() ); - local_chan_keys.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap() + local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -1705,8 +1702,9 @@ fn test_fee_spike_violation_fails_htlc() { fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let mut chanmon_cfgs = create_chanmon_cfgs(2); // Set the fee rate for the channel very high, to the point where the fundee - // sending any amount would result in a channel reserve violation. In this test - // we check that we would be prevented from sending an HTLC in this situation. + // sending any above-dust amount would result in a channel reserve violation. + // In this test we check that we would be prevented from sending an HTLC in + // this situation. chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 }; chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 }; let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); @@ -1722,9 +1720,9 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.first().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap(); (route, payment_hash, payment_preimage) }} - }; + } - let (route, our_payment_hash, _) = get_route_and_payment_hash!(1000); + let (route, our_payment_hash, _) = get_route_and_payment_hash!(4843000); unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value")); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1754,7 +1752,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.first().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap(); (route, payment_hash, payment_preimage) }} - }; + } let (route, payment_hash, _) = get_route_and_payment_hash!(1000); // Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc() @@ -1782,6 +1780,57 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { check_added_monitors!(nodes[0], 1); } +#[test] +fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() { + // Test that if we receive many dust HTLCs over an outbound channel, they don't count when + // calculating our commitment transaction fee (this was previously broken). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a + // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment + // transaction fee with 0 HTLCs (183 sats)). + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98817000, InitFeatures::known(), InitFeatures::known()); + + let dust_amt = 546000; // Dust amount + // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel + // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the + // commitment transaction fee. + let (_, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt); +} + +#[test] +fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() { + // Test that if we receive many dust HTLCs over an inbound channel, they don't count when + // calculating our counterparty's commitment transaction fee (this was previously broken). + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000, InitFeatures::known(), InitFeatures::known()); + + let payment_amt = 46000; // Dust amount + // In the previous code, these first four payments would succeed. + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + + // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer. + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); + + // And this last payment previously resulted in nodes[1] closing on its inbound-channel + // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment + // transaction fee and therefore perceived this next payment as a channel reserve violation. + let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt); +} + #[test] fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let chanmon_cfgs = create_chanmon_cfgs(3); @@ -1799,7 +1848,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap(); (route, payment_hash, payment_preimage) }} - }; + } let feemsat = 239; let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; @@ -1900,7 +1949,7 @@ fn test_channel_reserve_holding_cell_htlcs() { let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap(); (route, payment_hash, payment_preimage) }} - }; + } macro_rules! expect_forward { ($node: expr) => {{ @@ -2093,23 +2142,6 @@ fn test_channel_reserve_holding_cell_htlcs() { let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1); let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat; - { - let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_3 + 1); - let err = nodes[0].node.send_payment(&route, our_payment_hash, &None).err().unwrap(); - match err { - PaymentSendFailure::AllFailedRetrySafe(ref fails) => { - match &fails[0] { - &APIError::ChannelUnavailable{ref err} => - assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)), - _ => panic!("Unexpected error variant"), - } - }, - _ => panic!("Unexpected error variant"), - } - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 3); - } - send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3, recv_value_3); let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1); @@ -3400,7 +3432,7 @@ fn test_htlc_ignore_latest_remote_commitment() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id); + nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap(); check_closed_broadcast!(nodes[0], false); check_added_monitors!(nodes[0], 1); @@ -3461,7 +3493,7 @@ fn test_force_close_fail_back() { // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id); + nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap(); check_closed_broadcast!(nodes[2], false); check_added_monitors!(nodes[2], 1); let tx = { @@ -3485,8 +3517,8 @@ fn test_force_close_fail_back() { // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { - let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap(); - monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap() + let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.read().unwrap(); + monitors.get(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap() .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &&logger); } connect_block(&nodes[2], &block, 1); @@ -4205,8 +4237,8 @@ fn test_invalid_channel_announcement() { nodes[0].net_graph_msg_handler.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } ); - let as_bitcoin_key = as_chan.get_keys().inner.holder_channel_pubkeys.funding_pubkey; - let bs_bitcoin_key = bs_chan.get_keys().inner.holder_channel_pubkeys.funding_pubkey; + let as_bitcoin_key = as_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey; + let bs_bitcoin_key = bs_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey; let as_network_key = nodes[0].node.get_our_node_id(); let bs_network_key = nodes[1].node.get_our_node_id(); @@ -4233,8 +4265,8 @@ fn test_invalid_channel_announcement() { macro_rules! sign_msg { ($unsigned_msg: expr) => { let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap(); - let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_keys().inner.funding_key); - let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_keys().inner.funding_key); + let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_signer().inner.funding_key); + let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_signer().inner.funding_key); let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret()); let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].keys_manager.get_node_secret()); chan_announcement = msgs::ChannelAnnouncement { @@ -4273,7 +4305,7 @@ fn test_no_txn_manager_serialize_deserialize() { let fee_estimator: test_utils::TestFeeEstimator; let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known()); @@ -4282,7 +4314,7 @@ fn test_no_txn_manager_serialize_deserialize() { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); logger = test_utils::TestLogger::new(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; @@ -4291,7 +4323,7 @@ fn test_no_txn_manager_serialize_deserialize() { new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager); nodes[0].chain_monitor = &new_chain_monitor; let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; - let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + let (_, mut chan_0_monitor) = <(Option, ChannelMonitor)>::read( &mut chan_0_monitor_read, keys_manager).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -4300,7 +4332,7 @@ fn test_no_txn_manager_serialize_deserialize() { let (_, nodes_0_deserialized_tmp) = { let mut channel_monitors = HashMap::new(); channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(Option, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: config, keys_manager, fee_estimator: &fee_estimator, @@ -4349,7 +4381,7 @@ fn test_manager_serialize_deserialize_events() { let persister: test_utils::TestPersister; let logger: test_utils::TestLogger; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); // Start creating a channel, but stop right before broadcasting the event message FundingBroadcastSafe @@ -4391,7 +4423,7 @@ fn test_manager_serialize_deserialize_events() { // Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; logger = test_utils::TestLogger::new(); @@ -4400,7 +4432,7 @@ fn test_manager_serialize_deserialize_events() { new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager); nodes[0].chain_monitor = &new_chain_monitor; let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; - let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + let (_, mut chan_0_monitor) = <(Option, ChannelMonitor)>::read( &mut chan_0_monitor_read, keys_manager).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -4409,7 +4441,7 @@ fn test_manager_serialize_deserialize_events() { let (_, nodes_0_deserialized_tmp) = { let mut channel_monitors = HashMap::new(); channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(Option, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: config, keys_manager, fee_estimator: &fee_estimator, @@ -4472,7 +4504,7 @@ fn test_simple_manager_serialize_deserialize() { let fee_estimator: test_utils::TestFeeEstimator; let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); @@ -4483,7 +4515,7 @@ fn test_simple_manager_serialize_deserialize() { let nodes_0_serialized = nodes[0].node.encode(); let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); + nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap(); logger = test_utils::TestLogger::new(); fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; @@ -4492,7 +4524,7 @@ fn test_simple_manager_serialize_deserialize() { new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager); nodes[0].chain_monitor = &new_chain_monitor; let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..]; - let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor)>::read( + let (_, mut chan_0_monitor) = <(Option, ChannelMonitor)>::read( &mut chan_0_monitor_read, keys_manager).unwrap(); assert!(chan_0_monitor_read.is_empty()); @@ -4500,7 +4532,7 @@ fn test_simple_manager_serialize_deserialize() { let (_, nodes_0_deserialized_tmp) = { let mut channel_monitors = HashMap::new(); channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor); - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(Option, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager, fee_estimator: &fee_estimator, @@ -4533,14 +4565,14 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let fee_estimator: test_utils::TestFeeEstimator; let persister: test_utils::TestPersister; let new_chain_monitor: test_utils::TestChainMonitor; - let nodes_0_deserialized: ChannelManager; + let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known()); let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known()); let mut node_0_stale_monitors_serialized = Vec::new(); - for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() { + for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() { let mut writer = test_utils::TestVecWriter(Vec::new()); monitor.1.write(&mut writer).unwrap(); node_0_stale_monitors_serialized.push(writer.0); @@ -4559,7 +4591,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { // Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/ // nodes[3]) let mut node_0_monitors_serialized = Vec::new(); - for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() { + for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() { let mut writer = test_utils::TestVecWriter(Vec::new()); monitor.1.write(&mut writer).unwrap(); node_0_monitors_serialized.push(writer.0); @@ -4576,7 +4608,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_stale_monitors = Vec::new(); for serialized in node_0_stale_monitors_serialized.iter() { let mut read = &serialized[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read(&mut read, keys_manager).unwrap(); + let (_, monitor) = <(Option, ChannelMonitor)>::read(&mut read, keys_manager).unwrap(); assert!(read.is_empty()); node_0_stale_monitors.push(monitor); } @@ -4584,14 +4616,14 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut node_0_monitors = Vec::new(); for serialized in node_0_monitors_serialized.iter() { let mut read = &serialized[..]; - let (_, monitor) = <(BlockHash, ChannelMonitor)>::read(&mut read, keys_manager).unwrap(); + let (_, monitor) = <(Option, ChannelMonitor)>::read(&mut read, keys_manager).unwrap(); assert!(read.is_empty()); node_0_monitors.push(monitor); } let mut nodes_0_read = &nodes_0_serialized[..]; if let Err(msgs::DecodeError::InvalidValue) = - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(Option, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager, fee_estimator: &fee_estimator, @@ -4605,7 +4637,7 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { let mut nodes_0_read = &nodes_0_serialized[..]; let (_, nodes_0_deserialized_tmp) = - <(BlockHash, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { + <(Option, ChannelManager)>::read(&mut nodes_0_read, ChannelManagerReadArgs { default_config: UserConfig::default(), keys_manager, fee_estimator: &fee_estimator, @@ -4655,120 +4687,26 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { macro_rules! check_spendable_outputs { ($node: expr, $der_idx: expr, $keysinterface: expr, $chan_value: expr) => { { - let events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); + let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events(); let mut txn = Vec::new(); - for event in events { + let mut all_outputs = Vec::new(); + let secp_ctx = Secp256k1::new(); + for event in events.drain(..) { match event { - Event::SpendableOutputs { ref outputs } => { - for outp in outputs { - match *outp { - SpendableOutputDescriptor::StaticOutputCounterpartyPayment { ref outpoint, ref output, ref key_derivation_params } => { - let input = TxIn { - previous_output: outpoint.into_bitcoin_outpoint(), - script_sig: Script::new(), - sequence: 0, - witness: Vec::new(), - }; - let outp = TxOut { - script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), - value: output.value, - }; - let mut spend_tx = Transaction { - version: 2, - lock_time: 0, - input: vec![input], - output: vec![outp], - }; - spend_tx.output[0].value -= (spend_tx.get_weight() + 2 + 1 + 73 + 35 + 3) as u64 / 4; // (Max weight + 3 (to round up)) / 4 - let secp_ctx = Secp256k1::new(); - let keys = $keysinterface.derive_channel_keys($chan_value, key_derivation_params.0, key_derivation_params.1); - let remotepubkey = keys.pubkeys().payment_point; - let witness_script = Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey(); - let sighash = Message::from_slice(&bip143::SigHashCache::new(&spend_tx).signature_hash(0, &witness_script, output.value, SigHashType::All)[..]).unwrap(); - let remotesig = secp_ctx.sign(&sighash, &keys.inner.payment_key); - spend_tx.input[0].witness.push(remotesig.serialize_der().to_vec()); - spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec()); - txn.push(spend_tx); - }, - SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref key_derivation_params, ref revocation_pubkey } => { - let input = TxIn { - previous_output: outpoint.into_bitcoin_outpoint(), - script_sig: Script::new(), - sequence: *to_self_delay as u32, - witness: Vec::new(), - }; - let outp = TxOut { - script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), - value: output.value, - }; - let mut spend_tx = Transaction { - version: 2, - lock_time: 0, - input: vec![input], - output: vec![outp], - }; - let secp_ctx = Secp256k1::new(); - let keys = $keysinterface.derive_channel_keys($chan_value, key_derivation_params.0, key_derivation_params.1); - if let Ok(delayed_payment_key) = chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &keys.inner.delayed_payment_base_key) { - - let delayed_payment_pubkey = PublicKey::from_secret_key(&secp_ctx, &delayed_payment_key); - let witness_script = chan_utils::get_revokeable_redeemscript(revocation_pubkey, *to_self_delay, &delayed_payment_pubkey); - spend_tx.output[0].value -= (spend_tx.get_weight() + 2 + 1 + 73 + 1 + witness_script.len() + 1 + 3) as u64 / 4; // (Max weight + 3 (to round up)) / 4 - let sighash = Message::from_slice(&bip143::SigHashCache::new(&spend_tx).signature_hash(0, &witness_script, output.value, SigHashType::All)[..]).unwrap(); - let local_delayedsig = secp_ctx.sign(&sighash, &delayed_payment_key); - spend_tx.input[0].witness.push(local_delayedsig.serialize_der().to_vec()); - spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(vec!()); //MINIMALIF - spend_tx.input[0].witness.push(witness_script.clone().into_bytes()); - } else { panic!() } - txn.push(spend_tx); - }, - SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { - let secp_ctx = Secp256k1::new(); - let input = TxIn { - previous_output: outpoint.into_bitcoin_outpoint(), - script_sig: Script::new(), - sequence: 0, - witness: Vec::new(), - }; - let outp = TxOut { - script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), - value: output.value, - }; - let mut spend_tx = Transaction { - version: 2, - lock_time: 0, - input: vec![input], - output: vec![outp.clone()], - }; - spend_tx.output[0].value -= (spend_tx.get_weight() + 2 + 1 + 73 + 35 + 3) as u64 / 4; // (Max weight + 3 (to round up)) / 4 - let secret = { - match ExtendedPrivKey::new_master(Network::Testnet, &$node.node_seed) { - Ok(master_key) => { - match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx).expect("key space exhausted")) { - Ok(key) => key, - Err(_) => panic!("Your RNG is busted"), - } - } - Err(_) => panic!("Your rng is busted"), - } - }; - let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key; - let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey(); - let sighash = Message::from_slice(&bip143::SigHashCache::new(&spend_tx).signature_hash(0, &witness_script, output.value, SigHashType::All)[..]).unwrap(); - let sig = secp_ctx.sign(&sighash, &secret.private_key.key); - spend_tx.input[0].witness.push(sig.serialize_der().to_vec()); - spend_tx.input[0].witness[0].push(SigHashType::All as u8); - spend_tx.input[0].witness.push(pubkey.key.serialize().to_vec()); - txn.push(spend_tx); - }, - } + Event::SpendableOutputs { mut outputs } => { + for outp in outputs.drain(..) { + txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx).unwrap()); + all_outputs.push(outp); } }, _ => panic!("Unexpected event"), }; } + if all_outputs.len() > 1 { + if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx) { + txn.push(tx); + } + } txn } } @@ -4783,7 +4721,7 @@ fn test_claim_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.force_close_channel(&chan.2); + nodes[1].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[1], false); check_added_monitors!(nodes[1], 1); let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -4810,7 +4748,7 @@ fn test_claim_on_remote_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known()); - nodes[0].node.force_close_channel(&chan.2); + nodes[0].node.force_close_channel(&chan.2).unwrap(); check_closed_broadcast!(nodes[0], false); check_added_monitors!(nodes[0], 1); @@ -4858,9 +4796,10 @@ fn test_claim_on_remote_revoked_sizeable_push_msat() { connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash()); let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); - assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn.len(), 3); check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx check_spends!(spend_txn[1], node_txn[0]); + check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs } #[test] @@ -4955,8 +4894,10 @@ fn test_static_spendable_outputs_timeout_tx() { expect_payment_failed!(nodes[1], our_payment_hash, true); let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); - assert_eq!(spend_txn.len(), 2); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output + assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output + check_spends!(spend_txn[0], commitment_tx[0]); check_spends!(spend_txn[1], node_txn[0]); + check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs } #[test] @@ -5133,11 +5074,12 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() { // Check A's ChannelMonitor was able to generate the right spendable output descriptor let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000); - assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn.len(), 3); assert_eq!(spend_txn[0].input.len(), 1); check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output); check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx + check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs } #[test] @@ -5370,6 +5312,7 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0; let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2); + assert_eq!(local_txn.len(), 1); assert_eq!(local_txn[0].input.len(), 1); check_spends!(local_txn[0], chan_1.3); @@ -5390,10 +5333,13 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { } let node_txn = { let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + assert_eq!(node_txn.len(), 3); + assert_eq!(node_txn[0], node_txn[2]); + assert_eq!(node_txn[1], local_txn[0]); assert_eq!(node_txn[0].input.len(), 1); assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); check_spends!(node_txn[0], local_txn[0]); - vec![node_txn[0].clone(), node_txn[2].clone()] + vec![node_txn[0].clone()] }; let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; @@ -5402,9 +5348,8 @@ fn test_dynamic_spendable_outputs_local_htlc_success_tx() { // Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000); - assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); - check_spends!(spend_txn[1], node_txn[1]); } fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) { @@ -5699,9 +5644,10 @@ fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() { // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000); - assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn.len(), 3); check_spends!(spend_txn[0], local_txn[0]); check_spends!(spend_txn[1], htlc_timeout); + check_spends!(spend_txn[2], local_txn[0], htlc_timeout); } #[test] @@ -5769,9 +5715,10 @@ fn test_key_derivation_params() { // Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); let spend_txn = check_spendable_outputs!(nodes[0], 1, new_keys_manager, 100000); - assert_eq!(spend_txn.len(), 2); + assert_eq!(spend_txn.len(), 3); check_spends!(spend_txn[0], local_txn_1[0]); check_spends!(spend_txn[1], htlc_timeout); + check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout); } #[test] @@ -7249,7 +7196,7 @@ fn test_upfront_shutdown_script() { let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer - nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str())); check_added_monitors!(nodes[2], 1); @@ -7258,7 +7205,7 @@ fn test_upfront_shutdown_script() { nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id()); // We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign - nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); + nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); let events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -7272,7 +7219,7 @@ fn test_upfront_shutdown_script() { nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -7286,7 +7233,7 @@ fn test_upfront_shutdown_script() { nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); match events[0] { @@ -7300,7 +7247,70 @@ fn test_upfront_shutdown_script() { nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) } + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_upfront_shutdown_script_unsupport_segwit() { + // We test that channel is closed early + // if a segwit program is passed as upfront shutdown script, + // but the peer does not support segwit. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap(); + + let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(16) + .push_slice(&[0, 0]) + .into_script()); + + let features = InitFeatures::known().clear_shutdown_anysegwit(); + nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), features, &open_channel); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + match events[0] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert!(regex::Regex::new(r"Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: (\([A-Fa-f0-9]+\))").unwrap().is_match(&*msg.data)); + }, + _ => panic!("Unexpected event"), + } +} + +#[test] +fn test_shutdown_script_any_segwit_allowed() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + //// We test if the remote peer accepts opt_shutdown_anysegwit, a witness program can be used on shutdown + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + node_0_shutdown.scriptpubkey = Builder::new().push_int(16) + .push_slice(&[0, 0]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); match events[0] { @@ -7313,6 +7323,73 @@ fn test_upfront_shutdown_script() { } } +#[test] +fn test_shutdown_script_any_segwit_not_allowed() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + //// We test that if the remote peer does not accept opt_shutdown_anysegwit, the witness program cannot be used on shutdown + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + // Make an any segwit version script + node_0_shutdown.scriptpubkey = Builder::new().push_int(16) + .push_slice(&[0, 0]) + .into_script(); + let flags_no = InitFeatures::known().clear_shutdown_anysegwit(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &flags_no, &node_0_shutdown); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[1] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020000) from remote peer".to_owned()) + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} + +#[test] +fn test_shutdown_script_segwit_but_not_anysegwit() { + let mut config = UserConfig::default(); + config.channel_options.announced_channel = true; + config.peer_channel_config_limits.force_announced_channel_preference = false; + config.channel_options.commit_upfront_shutdown_pubkey = false; + let user_cfgs = [None, Some(config), None]; + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + //// We test that if shutdown any segwit is supported and we send a witness script with 0 version, this is not accepted + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known()); + nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap(); + let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); + // Make a segwit script that is not a valid as any segwit + node_0_shutdown.scriptpubkey = Builder::new().push_int(0) + .push_slice(&[0, 0]) + .into_script(); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[1] { + MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => { + assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned()) + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} + #[test] fn test_user_configurable_csv_delay() { // We test our channel constructors yield errors when we pass them absurd csv delay @@ -7402,7 +7479,7 @@ fn test_data_loss_protect() { // Cache node A state before any channel update let previous_node_state = nodes[0].node.encode(); let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new()); - nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap(); + nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap(); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000); send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000); @@ -7412,7 +7489,7 @@ fn test_data_loss_protect() { // Restore node A from previous state logger = test_utils::TestLogger::with_id(format!("node {}", 0)); - let mut chain_monitor = <(BlockHash, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1; + let mut chain_monitor = <(Option, ChannelMonitor)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1; chain_source = test_utils::TestChainSource::new(Network::Testnet); tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())}; fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 }; @@ -7421,7 +7498,7 @@ fn test_data_loss_protect() { node_state_0 = { let mut channel_monitors = HashMap::new(); channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor); - <(BlockHash, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs { + <(Option, ChannelManager)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs { keys_manager: keys_manager, fee_estimator: &fee_estimator, chain_monitor: &monitor, @@ -7484,7 +7561,7 @@ fn test_data_loss_protect() { let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[0].clone()]}, 0); connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 0, true, header.block_hash()); - let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000); + let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 1000000); assert_eq!(spend_txn.len(), 1); check_spends!(spend_txn[0], node_txn[0]); } @@ -8080,7 +8157,7 @@ fn test_counterparty_raa_skip_no_crash() { // commitment transaction, we would have happily carried on and provided them the next // commitment transaction based on one RAA forward. This would probably eventually have led to // channel closure, but it would not have resulted in funds loss. Still, our - // EnforcingChannelKeys would have paniced as it doesn't like jumps into the future. Here, we + // EnforcingSigner would have paniced as it doesn't like jumps into the future. Here, we // check simply that the channel is closed in response to such an RAA, but don't check whether // we decide to punish our counterparty for revoking their funds (as we don't currently // implement that). @@ -8091,7 +8168,7 @@ fn test_counterparty_raa_skip_no_crash() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; let mut guard = nodes[0].node.channel_state.lock().unwrap(); - let keys = &guard.by_id.get_mut(&channel_id).unwrap().holder_keys; + let keys = &guard.by_id.get_mut(&channel_id).unwrap().get_signer(); const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER); // Must revoke without gaps @@ -8149,10 +8226,10 @@ fn test_bump_txn_sanitize_tracking_maps() { connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130); connect_blocks(&nodes[0], 5, 130, false, header_130.block_hash()); { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) { - assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty()); - assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty()); + assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty()); + assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty()); } } } @@ -8283,11 +8360,11 @@ fn test_update_err_monitor_lockdown() { let logger = test_utils::TestLogger::with_id(format!("node {}", 0)); let persister = test_utils::TestPersister::new(); let watchtower = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( + let new_monitor = <(Option, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; assert!(new_monitor == *monitor); let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); @@ -8342,11 +8419,11 @@ fn test_concurrent_monitor_claim() { let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice")); let persister = test_utils::TestPersister::new(); let watchtower_alice = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( + let new_monitor = <(Option, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; assert!(new_monitor == *monitor); let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); @@ -8368,11 +8445,11 @@ fn test_concurrent_monitor_claim() { let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob")); let persister = test_utils::TestPersister::new(); let watchtower_bob = { - let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap(); + let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap(); let monitor = monitors.get(&outpoint).unwrap(); let mut w = test_utils::TestVecWriter(Vec::new()); monitor.write(&mut w).unwrap(); - let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( + let new_monitor = <(Option, channelmonitor::ChannelMonitor)>::read( &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1; assert!(new_monitor == *monitor); let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); @@ -8553,7 +8630,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain // responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor. let mut force_closing_node = 0; // Alice force-closes if !broadcast_alice { force_closing_node = 1; } // Bob force-closes - nodes[force_closing_node].node.force_close_channel(&chan_ab.2); + nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap(); check_closed_broadcast!(nodes[force_closing_node], false); check_added_monitors!(nodes[force_closing_node], 1); if go_onchain_before_fulfill { @@ -8836,3 +8913,66 @@ fn test_duplicate_chan_id() { update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update); send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000); } + +#[test] +fn test_error_chans_closed() { + // Test that we properly handle error messages, closing appropriate channels. + // + // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different + // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time + // we can test various edge cases around it to ensure we don't regress. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Create some initial channels + let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + + assert_eq!(nodes[0].node.list_usable_channels().len(), 3); + assert_eq!(nodes[1].node.list_usable_channels().len(), 2); + assert_eq!(nodes[2].node.list_usable_channels().len(), 1); + + // Closing a channel from a different peer has no effect + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() }); + assert_eq!(nodes[0].node.list_usable_channels().len(), 3); + + // Closing one channel doesn't impact others + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() }); + check_added_monitors!(nodes[0], 1); + check_closed_broadcast!(nodes[0], false); + assert_eq!(nodes[0].node.list_usable_channels().len(), 2); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2); + + // A null channel ID should close all channels + let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known()); + nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() }); + check_added_monitors!(nodes[0], 2); + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 2); + match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + match events[1] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + }, + _ => panic!("Unexpected event"), + } + // Note that at this point users of a standard PeerHandler will end up calling + // peer_disconnected with no_connection_possible set to false, duplicating the + // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for + // users with their own peer handling logic. We duplicate the call here, however. + assert_eq!(nodes[0].node.list_usable_channels().len(), 1); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true); + assert_eq!(nodes[0].node.list_usable_channels().len(), 1); + assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2); +}