use chain;
use chain::{Confirm, Listen, Watch};
+use chain::chaininterface::LowerBoundedFeeEstimator;
use chain::channelmonitor;
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use chain::transaction::OutPoint;
use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use util::enforcing_trait_impls::EnforcingSigner;
use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use util::errors::APIError;
use util::ser::{Writeable, ReadableArgs};
use util::config::UserConfig;
use bitcoin::hash_types::BlockHash;
use bitcoin::blockdata::block::{Block, BlockHeader};
-use bitcoin::blockdata::script::Builder;
+use bitcoin::blockdata::script::{Builder, Script};
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
use prelude::*;
use alloc::collections::BTreeSet;
use core::default::Default;
+use core::iter::repeat;
use sync::{Arc, Mutex};
use ln::functional_test_utils::*;
fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
- // Add a duplicate new channel from 2 to 4
- let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
-
- // Send some payments across both channels
- let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
- let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
- let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-
-
- route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 0);
- nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
-
- //TODO: Test that routes work again here as we've been notified that the channel is full
-
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
-
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
- close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
}
#[test]
// We have to forward pending HTLCs twice - once tries to forward the payment forward (and
// fails), the second will process the resulting failure and fail the HTLC backward.
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// attempt to send amt_msat > their_max_htlc_value_in_flight_msat
{
- let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], recv_value_0);
+ let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0);
+ let (mut route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], payment_params, recv_value_0, TEST_FINAL_CLTV);
route.paths[0].last_mut().unwrap().fee_msat += 1;
assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
+
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
if stat01.value_to_self_msat < stat01.channel_reserve_msat + commit_tx_fee_all_htlcs + ensure_htlc_amounts_above_dust_buffer + amt_msat {
break;
}
- send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
+
+ let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id())
+ .with_features(InvoiceFeatures::known()).with_max_channel_saturation_power_of_half(0);
+ let route = get_route!(nodes[0], payment_params, recv_value_0, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, ..) = send_along_route(&nodes[0], route, &[&nodes[1], &nodes[2]], recv_value_0);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
let (stat01_, stat11_, stat12_, stat22_) = (
get_channel_value_stat!(nodes[0], chan_1.2),
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let mut events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
- match events[1] {
+ match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
check_spends!(commitment_tx[0], chan_2.3);
nodes[2].node.fail_htlc_backwards(&payment_hash);
check_added_monitors!(nodes[2], 0);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
check_added_monitors!(nodes[2], 1);
let events = nodes[2].node.get_and_clear_pending_msg_events();
}
}
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
nodes[2].node.fail_htlc_backwards(&first_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// Drop the last RAA from 3 -> 2
nodes[2].node.fail_htlc_backwards(&second_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
check_added_monitors!(nodes[2], 1);
nodes[2].node.fail_htlc_backwards(&third_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// commitment transaction for nodes[0] until process_pending_htlc_forwards().
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), 2);
match events[0] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
+ match events[1] {
+ Event::HTLCHandlingFailed { .. } => { },
+ _ => panic!("Unexpected event"),
+ }
// Deliberately don't process the pending fail-back so they all fail back at once after
// block connection just like the !deliver_bs_raa case
}
assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
+ assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() });
match events[0] {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
_ => panic!("Unexepected event"),
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0], node_txn[1]);
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
get_monitor!(nodes[2], payment_event.commitment_msg.channel_id)
- .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &node_cfgs[2].logger);
+ .provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &LowerBoundedFeeEstimator::new(node_cfgs[2].fee_estimator), &node_cfgs[2].logger);
}
mine_transaction(&nodes[2], &tx);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
connect_block(&nodes[1], &block);
}
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
connect_blocks(&nodes[1], 1);
if forwarded_htlc {
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(fail_commit.len(), 1);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
- let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(revoked_htlc_txn.len(), 2);
check_spends!(revoked_htlc_txn[0], chan_1.3);
assert_eq!(revoked_htlc_txn[1].input.len(), 1);
mine_transaction(&nodes[1], &htlc_timeout_tx);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
&[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
- let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
+ let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+ let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
+ let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
+ let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
// Rebalance and check output sanity...
send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
- let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+ let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
// Double-check that six of the new HTLC were added
// We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
// with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
// Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
// Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
nodes[4].node.fail_htlc_backwards(&payment_hash_5);
nodes[4].node.fail_htlc_backwards(&payment_hash_6);
check_added_monitors!(nodes[4], 0);
- expect_pending_htlcs_forwardable!(nodes[4]);
+
+ let failed_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
check_added_monitors!(nodes[4], 1);
let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
nodes[5].node.fail_htlc_backwards(&payment_hash_2);
nodes[5].node.fail_htlc_backwards(&payment_hash_4);
check_added_monitors!(nodes[5], 0);
- expect_pending_htlcs_forwardable!(nodes[5]);
+
+ let failed_destinations_2 = vec![
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
check_added_monitors!(nodes[5], 1);
let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
- let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
-
- expect_pending_htlcs_forwardable!(nodes[3]);
+ let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
+
+ // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
+ let failed_destinations_3 = vec![
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
check_added_monitors!(nodes[3], 1);
let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
//
// Alternatively, we may broadcast the previous commitment transaction, which should only
// result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
- let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
+ let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
if announce_latest {
mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
}
let events = nodes[2].node.get_and_clear_pending_events();
let close_event = if deliver_last_raa {
- assert_eq!(events.len(), 2);
- events[1].clone()
+ assert_eq!(events.len(), 2 + 6);
+ events.last().clone().unwrap()
} else {
assert_eq!(events.len(), 1);
- events[0].clone()
+ events.last().clone().unwrap()
};
match close_event {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
check_closed_broadcast!(nodes[2], true);
if deliver_last_raa {
expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+
+ let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
+ expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
} else {
- expect_pending_htlcs_forwardable!(nodes[2]);
+ let expected_destinations: Vec<HTLCDestination> = if announce_latest {
+ repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
+ } else {
+ repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
+ };
+
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
}
check_added_monitors!(nodes[2], 3);
let htlc_value = if use_dust { 50000 } else { 3000000 };
let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
nodes[1].node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(process_htlc_forwards_event.len(), 1);
+ assert_eq!(process_htlc_forwards_event.len(), 2);
match &process_htlc_forwards_event[0] {
&Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_4.len(), 1);
// Fail one HTLC to prune it in the will-be-latest-local commitment tx
nodes[1].node.fail_htlc_backwards(&payment_hash_2);
check_added_monitors!(nodes[1], 0);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
check_added_monitors!(nodes[1], 1);
let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
- if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+ if let Err(error) = Channel::new_outbound(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), 1000000, 1000000, 0,
&low_our_to_self_config, 0, 42)
{
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+ if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0,
&low_our_to_self_config, 0, &nodes[0].logger, 42)
{
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) },
+ if let Err(error) = Channel::new_from_req(&LowerBoundedFeeEstimator::new(&test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }),
&nodes[0].keys_manager, nodes[1].node.get_our_node_id(), &InitFeatures::known(), &open_channel, 0,
&high_their_to_self_config, 0, &nodes[0].logger, 42)
{
// Note that we first have to wait a random delay before processing the receipt of the HTLC,
// and then will wait a second random delay before failing the HTLC back:
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
// Node 3 is expecting payment of 100_000 but received 10_000,
// it should fail htlc like we didn't know the preimage.
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
- let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(revoked_htlc_txn.len(), 3);
check_spends!(revoked_htlc_txn[1], chan.3);
connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
let events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
- match events[1] {
+ match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
- let mut guard = nodes[0].node.channel_state.lock().unwrap();
- let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
+ let per_commitment_secret;
+ let next_per_commitment_point;
+ {
+ let mut guard = nodes[0].node.channel_state.lock().unwrap();
+ let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
- const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+ const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
- // Make signer believe we got a counterparty signature, so that it allows the revocation
- keys.get_enforcement_state().last_holder_commitment -= 1;
- let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+ // Make signer believe we got a counterparty signature, so that it allows the revocation
+ keys.get_enforcement_state().last_holder_commitment -= 1;
+ per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
- // Must revoke without gaps
- keys.get_enforcement_state().last_holder_commitment -= 1;
- keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
+ // Must revoke without gaps
+ keys.get_enforcement_state().last_holder_commitment -= 1;
+ keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
- keys.get_enforcement_state().last_holder_commitment -= 1;
- let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
- &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+ keys.get_enforcement_state().last_holder_commitment -= 1;
+ next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
+ &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+ }
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
&msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
// Lock HTLC in both directions
- let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
- route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
+ let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+ let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
assert_eq!(revoked_local_txn[0].input.len(), 1);
assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
// Revoke local commitment tx
- claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
// Broadcast set of revoked txn on A
connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
- expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
// Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
// that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
- assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
+ assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
// Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
// that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
- assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
+ assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
// Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
// the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
// `channel_value`.
- assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+ assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
// Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
// the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
// `channel_value`.
- assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+ assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
}
#[test]
// `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
// `handle_accept_channel`, which is required in order for `create_funding_transaction` to
// succeed when `nodes[0]` is passed to it.
- {
+ let accept_chan_msg = {
let mut lock;
let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id);
- let accept_chan_msg = channel.get_accept_channel_message();
- nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
- }
+ channel.get_accept_channel_message()
+ };
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
// All the below cases should end up being handled exactly identically, so we macro the
// resulting events.
macro_rules! handle_unknown_invalid_payment_data {
- () => {
+ ($payment_hash: expr) => {
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
let payment_event = SendEvent::from_event(events.pop().unwrap());
// We have to forward pending HTLCs once to process the receipt of the HTLC and then
// again to process the pending backwards-failure of the HTLC
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
check_added_monitors!(nodes[1], 1);
// We should fail the payment back
// Send a payment with the right payment hash but the wrong payment secret
nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(our_payment_hash);
expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
// Send a payment with a random payment hash, but the right payment secret
nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(random_payment_hash);
expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
// Send a payment with a random payment hash and random payment secret
nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(random_payment_hash);
expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
}
// funding transactions from their counterparties, leading to a multi-implementation critical
// security vulnerability (though we always sanitized properly, we've previously had
// un-released crashes in the sanitization process).
+ //
+ // Further, if the funding transaction is consensus-valid, confirms, and is later spent, we'd
+ // previously have crashed in `ChannelMonitor` even though we closed the channel as bogus and
+ // gave up on it. We test this here by generating such a transaction.
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()));
let (temporary_channel_id, mut tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
+
+ // Create a witness program which can be spent by a 4-empty-stack-elements witness and which is
+ // 136 bytes long. This matches our "accepted HTLC preimage spend" matching, previously causing
+ // a panic as we'd try to extract a 32 byte preimage from a witness element without checking
+ // its length.
+ let mut wit_program: Vec<u8> = channelmonitor::deliberately_bogus_accepted_htlc_witness_program();
+ assert!(chan_utils::HTLCType::scriptlen_to_htlctype(wit_program.len()).unwrap() ==
+ chan_utils::HTLCType::AcceptedHTLC);
+
+ let wit_program_script: Script = wit_program.clone().into();
for output in tx.output.iter_mut() {
// Make the confirmed funding transaction have a bogus script_pubkey
- output.script_pubkey = bitcoin::Script::new();
+ output.script_pubkey = Script::new_v0_p2wsh(&wit_program_script.wscript_hash());
}
nodes[0].node.funding_transaction_generated_unchecked(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone(), 0).unwrap();
} else { panic!(); }
} else { panic!(); }
assert_eq!(nodes[1].node.list_channels().len(), 0);
+
+ // Now confirm a spend of the (bogus) funding transaction. As long as the witness is 5 elements
+ // long the ChannelMonitor will try to read 32 bytes from the second-to-last element, panicing
+ // as its not 32 bytes long.
+ let mut spend_tx = Transaction {
+ version: 2i32, lock_time: 0,
+ input: tx.output.iter().enumerate().map(|(idx, _)| TxIn {
+ previous_output: BitcoinOutPoint {
+ txid: tx.txid(),
+ vout: idx as u32,
+ },
+ script_sig: Script::new(),
+ sequence: 0xfffffffd,
+ witness: Witness::from_vec(channelmonitor::deliberately_bogus_accepted_htlc_witness())
+ }).collect(),
+ output: vec![TxOut {
+ value: 1000,
+ script_pubkey: Script::new(),
+ }]
+ };
+ check_spends!(spend_tx, tx);
+ mine_transaction(&nodes[1], &spend_tx);
}
fn do_test_tx_confirmed_skipping_blocks_immediate_broadcast(test_height_before_timelock: bool) {
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
&nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// Now we go fail back the first HTLC from the user end.
nodes[1].node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ let expected_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+ HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations);
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
} else {
// Let the second HTLC fail and claim the first
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
.with_features(InvoiceFeatures::known());
}
expect_pending_htlcs_forwardable_ignore!(nodes[3]);
nodes[3].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
nodes[3].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[3], 1);
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
check_added_monitors!(nodes[2], 1);
let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
connect_blocks(&nodes[3], TEST_FINAL_CLTV);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
- expect_pending_htlcs_forwardable!(nodes[3]);
+ let failed_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash },
+ HTLCDestination::FailedPayment { payment_hash },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);