X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=7572abff8313e90cdd149483a3ec5e919c864398;hb=79b55f081557f7df7fc27f6c39574904bdbd1737;hp=02ef5f37b9b2866c182e1773968002a8851b94b1;hpb=10071b51e2b7bafcef1ce958524bd70ef5c7ba5a;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 02ef5f37..7572abff 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -13,6 +13,7 @@ use chain; use chain::{Confirm, Listen, Watch}; +use chain::chaininterface::LowerBoundedFeeEstimator; use chain::channelmonitor; use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use chain::transaction::OutPoint; @@ -27,7 +28,7 @@ use routing::gossip::NetworkGraph; use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route}; use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; -use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction}; +use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; use util::enforcing_trait_impls::EnforcingSigner; use util::{byte_utils, test_utils}; use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason}; @@ -37,7 +38,7 @@ use util::config::UserConfig; use bitcoin::hash_types::BlockHash; use bitcoin::blockdata::block::{Block, BlockHeader}; -use bitcoin::blockdata::script::Builder; +use bitcoin::blockdata::script::{Builder, Script}; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::constants::genesis_block; use bitcoin::network::constants::Network; @@ -1058,26 +1059,6 @@ fn fake_network_test() { fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2); claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1); - // Add a duplicate new channel from 2 to 4 - let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()); - - // Send some payments across both channels - let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0; - let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0; - let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0; - - - route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000); - let events = nodes[0].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 0); - nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1); - - //TODO: Test that routes work again here as we've been notified that the channel is full - - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4); - claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5); - // Close down the channels... close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true); check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); @@ -1091,9 +1072,6 @@ fn fake_network_test() { close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false); check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); - close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure); } #[test] @@ -1823,9 +1801,12 @@ fn test_channel_reserve_holding_cell_htlcs() { // attempt to send amt_msat > their_max_htlc_value_in_flight_msat { - let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_0); + let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id()) + .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0); + let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0, TEST_FINAL_CLTV); route.paths[0].last_mut().unwrap().fee_msat += 1; assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err }, assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1844,7 +1825,12 @@ fn test_channel_reserve_holding_cell_htlcs() { if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat { break; } - send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0); + + let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id()) + .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0); + let route = get_route!(nodes[0], payment_params, recv_value_0, TEST_FINAL_CLTV).unwrap(); + let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0); + claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage); let (stat01_, stat11_, stat12_, stat22_) = ( get_channel_value_stat!(nodes[0], chan_1.2), @@ -2203,7 +2189,7 @@ fn channel_monitor_network_test() { send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000); // Simple case with no pending HTLCs: - nodes[1].node.force_close_channel(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_1.2, &nodes[0].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); { @@ -2224,7 +2210,7 @@ fn channel_monitor_network_test() { // Simple case of one pending HTLC to HTLC-Timeout (note that the HTLC-Timeout is not // broadcasted until we reach the timelock time). - nodes[1].node.force_close_channel(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan_2.2, &nodes[2].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); { @@ -2264,7 +2250,7 @@ fn channel_monitor_network_test() { // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2] // HTLC-Timeout and a nodes[3] claim against it (+ its own announces) - nodes[2].node.force_close_channel(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap(); + nodes[2].node.force_close_broadcasting_latest_txn(&chan_3.2, &nodes[3].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[2], 1); check_closed_broadcast!(nodes[2], true); let node2_commitment_txid; @@ -3403,13 +3389,13 @@ fn test_htlc_ignore_latest_remote_commitment() { create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); route_payment(&nodes[0], &[&nodes[1]], 10000000); - nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap(); connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 3); assert_eq!(node_txn[0], node_txn[1]); @@ -3466,7 +3452,7 @@ fn test_force_close_fail_back() { // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!). - nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[2].node.force_close_broadcasting_latest_txn(&payment_event.commitment_msg.channel_id, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[2], true); check_added_monitors!(nodes[2], 1); check_closed_event!(nodes[2], 1, ClosureReason::HolderForceClosed); @@ -3489,7 +3475,7 @@ fn test_force_close_fail_back() { // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success.. { get_monitor!(nodes[2], payment_event.commitment_msg.channel_id) - .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &node_cfgs[2].logger); + .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger); } mine_transaction(&nodes[2], &tx); let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap(); @@ -4793,7 +4779,7 @@ fn test_claim_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); - nodes[1].node.force_close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); @@ -4822,12 +4808,12 @@ fn test_claim_on_remote_sizeable_push_msat() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 98_000_000, InitFeatures::known(), InitFeatures::known()); - nodes[0].node.force_close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[0], true); check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 1); check_spends!(node_txn[0], chan.3); assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening @@ -5033,7 +5019,7 @@ fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() { check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires - let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(revoked_htlc_txn.len(), 2); check_spends!(revoked_htlc_txn[0], chan_1.3); assert_eq!(revoked_htlc_txn[1].input.len(), 1); @@ -7342,7 +7328,7 @@ fn test_user_configurable_csv_delay() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound() - if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, + if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0, &low_our_to_self_config, 0, 42) { @@ -7356,7 +7342,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, + if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &low_our_to_self_config, 0, &nodes[0].logger, 42) { @@ -7388,7 +7374,7 @@ fn test_user_configurable_csv_delay() { nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap(); let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id()); open_channel.to_self_delay = 200; - if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }, + if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }), &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0, &high_their_to_self_config, 0, &nodes[0].logger, 42) { @@ -7399,14 +7385,11 @@ fn test_user_configurable_csv_delay() { } else { assert!(false); } } -#[test] -fn test_data_loss_protect() { - // We want to be sure that : - // * we don't broadcast our Local Commitment Tx in case of fallen behind - // (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr) - // * we close channel in case of detecting other being fallen behind - // * we are able to claim our own outputs thanks to to_remote being static - // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775 +fn do_test_data_loss_protect(reconnect_panicing: bool) { + // When we get a data_loss_protect proving we're behind, we immediately panic as the + // chain::Watch API requirements have been violated (e.g. the user restored from a backup). The + // panic message informs the user they should force-close without broadcasting, which is tested + // if `reconnect_panicing` is not set. let persister; let logger; let fee_estimator; @@ -7464,53 +7447,53 @@ fn test_data_loss_protect() { check_added_monitors!(nodes[0], 1); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + if reconnect_panicing { + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None }); - let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - // Check we don't broadcast any transactions following learning of per_commitment_point from B - nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]); - check_added_monitors!(nodes[0], 1); + // Check we close channel detecting A is fallen-behind + // Check that we sent the warning message when we detected that A has fallen behind, + // and give the possibility for A to recover from the warning. + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); + let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned(); + assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg)); + + { + let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + // The node B should not broadcast the transaction to force close the channel! + assert!(node_txn.is_empty()); + } + + let reestablish_0 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + // Check A panics upon seeing proof it has fallen behind. + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_0[0]); + return; // By this point we should have panic'ed! + } + nodes[0].node.force_close_without_broadcasting_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); + check_added_monitors!(nodes[0], 1); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); { - let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); + let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 0); } - let mut reestablish_1 = Vec::with_capacity(1); for msg in nodes[0].node.get_and_clear_pending_msg_events() { - if let MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } = msg { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - reestablish_1.push(msg.clone()); - } else if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg { + if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg { } else if let MessageSendEvent::HandleError { ref action, .. } = msg { match action { &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.data, "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can't do any automated broadcasting"); + assert_eq!(msg.data, "Channel force-closed"); }, _ => panic!("Unexpected event!"), } } else { - panic!("Unexpected event") + panic!("Unexpected event {:?}", msg) } } - // Check we close channel detecting A is fallen-behind - // Check that we sent the warning message when we detected that A has fallen behind, - // and give the possibility for A to recover from the warning. - nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]); - let warn_msg = "Peer attempted to reestablish channel with a very old local commitment transaction".to_owned(); - assert!(check_warn_msg!(nodes[1], nodes[0].node.get_our_node_id(), chan.2).contains(&warn_msg)); - - // Check A is able to claim to_remote output - let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); - // The node B should not broadcast the transaction to force close the channel! - assert!(node_txn.is_empty()); - // B should now detect that there is something wrong and should force close the channel. - let exp_err = "We have fallen behind - we have received proof that if we broadcast remote is going to claim our funds - we can\'t do any automated broadcasting"; - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: exp_err.to_string() }); - // after the warning message sent by B, we should not able to // use the channel, or reconnect with success to the channel. assert!(nodes[0].node.list_usable_channels().is_empty()); @@ -7541,6 +7524,17 @@ fn test_data_loss_protect() { check_closed_broadcast!(nodes[1], false); } +#[test] +#[should_panic] +fn test_data_loss_protect_showing_stale_state_panics() { + do_test_data_loss_protect(true); +} + +#[test] +fn test_force_close_without_broadcast() { + do_test_data_loss_protect(false); +} + #[test] fn test_check_htlc_underpaying() { // Send payment through A -> B but A is maliciously @@ -7830,7 +7824,7 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above) - let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(revoked_htlc_txn.len(), 3); check_spends!(revoked_htlc_txn[1], chan.3); @@ -8091,22 +8085,26 @@ fn test_counterparty_raa_skip_no_crash() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let mut guard = nodes[0].node.channel_state.lock().unwrap(); - let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer(); + let per_commitment_secret; + let next_per_commitment_point; + { + let mut guard = nodes[0].node.channel_state.lock().unwrap(); + let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer(); - const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; - // Make signer believe we got a counterparty signature, so that it allows the revocation - keys.get_enforcement_state().last_holder_commitment -= 1; - let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER); + // Make signer believe we got a counterparty signature, so that it allows the revocation + keys.get_enforcement_state().last_holder_commitment -= 1; + per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER); - // Must revoke without gaps - keys.get_enforcement_state().last_holder_commitment -= 1; - keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); + // Must revoke without gaps + keys.get_enforcement_state().last_holder_commitment -= 1; + keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1); - keys.get_enforcement_state().last_holder_commitment -= 1; - let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + keys.get_enforcement_state().last_holder_commitment -= 1; + next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), + &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + } nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point }); @@ -8314,19 +8312,19 @@ fn test_channel_update_has_correct_htlc_maximum_msat() { // Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as // that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`. - assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat)); + assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat); // Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as // that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`. - assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat)); + assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat); // Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as // the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the // `channel_value`. - assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat)); + assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat); // Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as // the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the // `channel_value`. - assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat)); + assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat); } #[test] @@ -8365,7 +8363,7 @@ fn test_manually_accept_inbound_channel_request() { _ => panic!("Unexpected event"), } - nodes[1].node.force_close_channel(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temp_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); let close_msg_ev = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(close_msg_ev.len(), 1); @@ -8400,7 +8398,7 @@ fn test_manually_reject_inbound_channel_request() { let events = nodes[1].node.get_and_clear_pending_events(); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { - nodes[1].node.force_close_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap(); } _ => panic!("Unexpected event"), } @@ -8448,12 +8446,12 @@ fn test_reject_funding_before_inbound_channel_accepted() { // `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]` // `handle_accept_channel`, which is required in order for `create_funding_transaction` to // succeed when `nodes[0]` is passed to it. - { + let accept_chan_msg = { let mut lock; let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id); - let accept_chan_msg = channel.get_accept_channel_message(); - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg); - } + channel.get_accept_channel_message() + }; + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg); let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); @@ -9053,7 +9051,7 @@ fn do_test_onchain_htlc_settlement_after_close(broadcast_alice: bool, go_onchain force_closing_node = 1; counterparty_node = 0; } - nodes[force_closing_node].node.force_close_channel(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap(); + nodes[force_closing_node].node.force_close_broadcasting_latest_txn(&chan_ab.2, &nodes[counterparty_node].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[force_closing_node], true); check_added_monitors!(nodes[force_closing_node], 1); check_closed_event!(nodes[force_closing_node], 1, ClosureReason::HolderForceClosed); @@ -9416,6 +9414,10 @@ fn test_invalid_funding_tx() { // funding transactions from their counterparties, leading to a multi-implementation critical // security vulnerability (though we always sanitized properly, we've previously had // un-released crashes in the sanitization process). + // + // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd + // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and + // gave up on it. We test this here by generating such a transaction. let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -9426,9 +9428,19 @@ fn test_invalid_funding_tx() { nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42); + + // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is + // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing + // a panic as we'd try to extract a 32 byte preimage from a witness element without checking + // its length. + let mut wit_program: Vec = channelmonitor::deliberately_bogus_accepted_htlc_witness_program(); + assert!(chan_utils::HTLCType::scriptlen_to_htlctype(wit_program.len()).unwrap() == + chan_utils::HTLCType::AcceptedHTLC); + + let wit_program_script: Script = wit_program.clone().into(); for output in tx.output.iter_mut() { // Make the confirmed funding transaction have a bogus script_pubkey - output.script_pubkey = bitcoin::Script::new(); + output.script_pubkey = Script::new_v0_p2wsh(&wit_program_script.wscript_hash()); } nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap(); @@ -9458,6 +9470,28 @@ fn test_invalid_funding_tx() { } else { panic!(); } } else { panic!(); } assert_eq!(nodes[1].node.list_channels().len(), 0); + + // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements + // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing + // as its not 32 bytes long. + let mut spend_tx = Transaction { + version: 2i32, lock_time: 0, + input: tx.output.iter().enumerate().map(|(idx, _)| TxIn { + previous_output: BitcoinOutPoint { + txid: tx.txid(), + vout: idx as u32, + }, + script_sig: Script::new(), + sequence: 0xfffffffd, + witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness()) + }).collect(), + output: vec![TxOut { + value: 1000, + script_pubkey: Script::new(), + }] + }; + check_spends!(spend_tx, tx); + mine_transaction(&nodes[1], &spend_tx); } fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) { @@ -9489,7 +9523,7 @@ fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_t nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), false); nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); - nodes[1].node.force_close_channel(&channel_id, &nodes[2].node.get_our_node_id()).unwrap(); + nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap(); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); check_added_monitors!(nodes[1], 1); @@ -9866,7 +9900,7 @@ fn test_keysend_payments_to_public_node() { }; let scorer = test_utils::TestScorer::with_penalty(0); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); - let route = find_route(&payer_pubkey, &route_params, &network_graph.read_only(), None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); + let route = find_route(&payer_pubkey, &route_params, &network_graph, None, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); let test_preimage = PaymentPreimage([42; 32]); let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(test_preimage)).unwrap(); @@ -9902,8 +9936,8 @@ fn test_keysend_payments_to_private_node() { let scorer = test_utils::TestScorer::with_penalty(0); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); let route = find_route( - &payer_pubkey, &route_params, &network_graph.read_only(), - Some(&first_hops.iter().collect::>()), nodes[0].logger, &scorer, &random_seed_bytes + &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::>()), + nodes[0].logger, &scorer, &random_seed_bytes ).unwrap(); let test_preimage = PaymentPreimage([42; 32]);