use std::time::Instant;
use std::mem;
+const CHAN_CONFIRM_DEPTH: u32 = 100;
fn confirm_transaction(chain: &chaininterface::ChainWatchInterfaceUtil, tx: &Transaction, chan_id: u32) {
assert!(chain.does_match_tx(tx));
let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
chain.block_connected_checked(&header, 1, &[tx; 1], &[chan_id; 1]);
- for i in 2..100 {
+ for i in 2..CHAN_CONFIRM_DEPTH {
header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
chain.block_connected_checked(&header, i, &[tx; 0], &[0; 0]);
}
fn drop(&mut self) {
if !::std::thread::panicking() {
// Check that we processed all pending events
- assert_eq!(self.node.get_and_clear_pending_msg_events().len(), 0);
- assert_eq!(self.node.get_and_clear_pending_events().len(), 0);
- assert_eq!(self.chan_monitor.added_monitors.lock().unwrap().len(), 0);
+ assert!(self.node.get_and_clear_pending_msg_events().is_empty());
+ assert!(self.node.get_and_clear_pending_events().is_empty());
+ assert!(self.chan_monitor.added_monitors.lock().unwrap().is_empty());
}
}
}
}
}
+macro_rules! check_closed_broadcast {
+ ($node: expr) => {{
+ let events = $node.node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }}
+}
+
fn close_channel(outbound_node: &Node, inbound_node: &Node, channel_id: &[u8; 32], funding_tx: Transaction, close_inbound_first: bool) -> (msgs::ChannelUpdate, msgs::ChannelUpdate, Transaction) {
let (node_a, broadcaster_a, struct_a) = if close_inbound_first { (&inbound_node.node, &inbound_node.tx_broadcaster, inbound_node) } else { (&outbound_node.node, &outbound_node.tx_broadcaster, outbound_node) };
let (node_b, broadcaster_b) = if close_inbound_first { (&outbound_node.node, &outbound_node.tx_broadcaster) } else { (&inbound_node.node, &inbound_node.tx_broadcaster) };
{
let (extra_msg_option, bs_revoke_and_ack) = commitment_signed_dance!($node_a, $node_b, (), $fail_backwards, true, true, true);
$node_a.node.handle_revoke_and_ack(&$node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
- {
- let mut added_monitors = $node_a.chan_monitor.added_monitors.lock().unwrap();
- if $fail_backwards {
- assert_eq!(added_monitors.len(), 2);
- assert!(added_monitors[0].0 != added_monitors[1].0);
- } else {
- assert_eq!(added_monitors.len(), 1);
- }
- added_monitors.clear();
- }
+ check_added_monitors!($node_a, 1);
extra_msg_option
}
};
{
commitment_signed_dance!($node_a, $node_b, $commitment_signed, $fail_backwards, true);
if $fail_backwards {
+ expect_pending_htlcs_forwardable!($node_a);
+ check_added_monitors!($node_a, 1);
+
let channel_state = $node_a.node.channel_state.lock().unwrap();
assert_eq!(channel_state.pending_msg_events.len(), 1);
if let MessageSendEvent::UpdateHTLCs { ref node_id, .. } = channel_state.pending_msg_events[0] {
}}
}
-fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
- let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
-
+fn send_along_route_with_hash(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64, our_payment_hash: PaymentHash) {
let mut payment_event = {
origin_node.node.send_payment(route, our_payment_hash).unwrap();
check_added_monitors!(origin_node, 1);
prev_node = node;
}
+}
+fn send_along_route(origin_node: &Node, route: Route, expected_route: &[&Node], recv_value: u64) -> (PaymentPreimage, PaymentHash) {
+ let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(origin_node);
+ send_along_route_with_hash(origin_node, route, expected_route, recv_value, our_payment_hash);
(our_payment_preimage, our_payment_hash)
}
fn fail_payment_along_route(origin_node: &Node, expected_route: &[&Node], skip_last: bool, our_payment_hash: PaymentHash) {
assert!(expected_route.last().unwrap().node.fail_htlc_backwards(&our_payment_hash, 0));
+ expect_pending_htlcs_forwardable!(expected_route.last().unwrap());
check_added_monitors!(expected_route.last().unwrap(), 1);
let mut next_msgs: Option<(msgs::UpdateFailHTLC, msgs::CommitmentSigned)> = None;
{
$node.node.handle_update_fail_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0).unwrap();
commitment_signed_dance!($node, $prev_node, next_msgs.as_ref().unwrap().1, !$last_node);
+ if skip_last && $last_node {
+ expect_pending_htlcs_forwardable!($node);
+ }
}
}
}
// get_closing_signed_broadcast usually eats the BroadcastChannelUpdate for us and
// checks it, but in this case nodes[0] didn't ever get a chance to receive a
// closing_signed so we do it ourselves
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
- assert_eq!(msg.contents.flags & 2, 2);
- },
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0]);
}
assert!(nodes[0].node.list_channels().is_empty());
// If we send a garbage message, the channel should get closed, making the rest of this test case fail.
assert_eq!(nodes[1].node.list_channels().len(), 1);
assert_eq!(nodes[1].node.list_channels().len(), 1);
- let channel_close_broadcast = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(channel_close_broadcast.len(), 1);
- match channel_close_broadcast[0] {
- MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
- assert_eq!(msg.contents.flags & 2, 2);
- },
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
return;
}
}
// Test that in case of an unilateral close onchain, we detect the state of output thanks to
// ChainWatchInterface and pass the preimage backward accordingly. So here we test that ChannelManager is
// broadcasting the right event to other nodes in payment path.
+ // We test with two HTLCs simultaneously as that was not handled correctly in the past.
// A --------------------> B ----------------------> C (preimage)
- // First, C should claim the HTLC output via HTLC-Success when its own latest local
+ // First, C should claim the HTLC outputs via HTLC-Success when its own latest local
// commitment transaction was broadcast.
// Then, B should learn the preimage from said transactions, attempting to claim backwards
// towards B.
// B should be able to claim via preimage if A then broadcasts its local tx.
// Finally, when A sees B's latest local commitment transaction it should be able to claim
- // the HTLC output via the preimage it learned (which, once confirmed should generate a
+ // the HTLC outputs via the preimage it learned (which, once confirmed should generate a
// PaymentSent event).
let nodes = create_network(3);
send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 8000000);
let (our_payment_preimage, _payment_hash) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
+ let (our_payment_preimage_2, _payment_hash_2) = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), 3000000);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
// Broadcast legit commitment tx from C on B's chain
assert_eq!(commitment_tx.len(), 1);
check_spends!(commitment_tx[0], chan_2.3.clone());
nodes[2].node.claim_funds(our_payment_preimage);
- check_added_monitors!(nodes[2], 1);
+ nodes[2].node.claim_funds(our_payment_preimage_2);
+ check_added_monitors!(nodes[2], 2);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
assert!(updates.update_fail_htlcs.is_empty());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
- let events = nodes[2].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
- let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 2 (2 * HTLC-Success tx)
- assert_eq!(node_txn.len(), 3);
- assert_eq!(node_txn[1], commitment_tx[0]);
- assert_eq!(node_txn[0], node_txn[2]);
+ check_closed_broadcast!(nodes[2]);
+ let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 4 (2*2 * HTLC-Success tx)
+ assert_eq!(node_txn.len(), 5);
+ assert_eq!(node_txn[0], node_txn[3]);
+ assert_eq!(node_txn[1], node_txn[4]);
+ assert_eq!(node_txn[2], commitment_tx[0]);
check_spends!(node_txn[0], commitment_tx[0].clone());
+ check_spends!(node_txn[1], commitment_tx[0].clone());
assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
assert_eq!(node_txn[0].lock_time, 0);
+ assert_eq!(node_txn[1].lock_time, 0);
// Verify that B's ChannelManager is able to extract preimage from HTLC Success tx and pass it backward
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: node_txn}, 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
{
let mut added_monitors = nodes[1].chan_monitor.added_monitors.lock().unwrap();
- assert_eq!(added_monitors.len(), 1);
+ assert_eq!(added_monitors.len(), 2);
assert_eq!(added_monitors[0].0.txid, chan_1.3.txid());
+ assert_eq!(added_monitors[1].0.txid, chan_1.3.txid());
added_monitors.clear();
}
assert_eq!(events.len(), 2);
},
_ => panic!("Unexpected event"),
};
- {
- // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate
- // commitment transaction with a corresponding HTLC-Timeout transaction, as well as a
- // timeout-claim of the output that nodes[2] just claimed via success.
- let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 (timeout tx) * 2 (block-rescan)
- assert_eq!(node_txn.len(), 4);
- assert_eq!(node_txn[0], node_txn[3]);
- check_spends!(node_txn[0], commitment_tx[0].clone());
- assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
- assert_ne!(node_txn[0].lock_time, 0);
- assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
- check_spends!(node_txn[1], chan_2.3.clone());
- check_spends!(node_txn[2], node_txn[1].clone());
- assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
- assert_eq!(node_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
- assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
- assert_ne!(node_txn[2].lock_time, 0);
- node_txn.clear();
- }
+ macro_rules! check_tx_local_broadcast {
+ ($node: expr, $htlc_offered: expr, $commitment_tx: expr, $chan_tx: expr) => { {
+ // ChannelManager : 3 (commitment tx, 2*HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 (block-rescan)
+ let mut node_txn = $node.tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 7);
+ assert_eq!(node_txn[0], node_txn[5]);
+ assert_eq!(node_txn[1], node_txn[6]);
+ check_spends!(node_txn[0], $commitment_tx.clone());
+ check_spends!(node_txn[1], $commitment_tx.clone());
+ assert_ne!(node_txn[0].lock_time, 0);
+ assert_ne!(node_txn[1].lock_time, 0);
+ if $htlc_offered {
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ } else {
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[1].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
+ assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ assert!(node_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
+ }
+ check_spends!(node_txn[2], $chan_tx.clone());
+ check_spends!(node_txn[3], node_txn[2].clone());
+ check_spends!(node_txn[4], node_txn[2].clone());
+ assert_eq!(node_txn[2].input[0].witness.last().unwrap().len(), 71);
+ assert_eq!(node_txn[3].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[4].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert!(node_txn[3].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert!(node_txn[4].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
+ assert_ne!(node_txn[3].lock_time, 0);
+ assert_ne!(node_txn[4].lock_time, 0);
+ node_txn.clear();
+ } }
+ }
+ // nodes[1] now broadcasts its own local state as a fallback, suggesting an alternate
+ // commitment transaction with a corresponding HTLC-Timeout transactions, as well as a
+ // timeout-claim of the output that nodes[2] just claimed via success.
+ check_tx_local_broadcast!(nodes[1], false, commitment_tx[0], chan_2.3);
// Broadcast legit commitment tx from A on B's chain
// Broadcast preimage tx by B on offered output from A commitment tx on A's chain
let commitment_tx = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
check_spends!(commitment_tx[0], chan_1.3.clone());
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx), ChannelMonitor : 1 (HTLC-Success) * 2 (block-rescan)
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0], node_txn[2]);
check_spends!(node_txn[0], commitment_tx[0].clone());
- assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[0].input.len(), 2);
+ assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
+ assert_eq!(node_txn[0].input[1].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert_eq!(node_txn[0].lock_time, 0);
assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
check_spends!(node_txn[1], chan_1.3.clone());
// Verify that A's ChannelManager is able to extract preimage from preimage tx and generate PaymentSent
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone(), node_txn[0].clone()] }, 1);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0]);
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- Event::PaymentSent { payment_preimage } => {
- assert_eq!(payment_preimage, our_payment_preimage);
- },
- _ => panic!("Unexpected event"),
+ assert_eq!(events.len(), 2);
+ let mut first_claimed = false;
+ for event in events {
+ match event {
+ Event::PaymentSent { payment_preimage } => {
+ if payment_preimage == our_payment_preimage {
+ assert!(!first_claimed);
+ first_claimed = true;
+ } else {
+ assert_eq!(payment_preimage, our_payment_preimage_2);
+ }
+ },
+ _ => panic!("Unexpected event"),
+ }
}
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 1 (HTLC-Timeout tx) * 2 (block-rescan)
- assert_eq!(node_txn.len(), 4);
- assert_eq!(node_txn[0], node_txn[3]);
- check_spends!(node_txn[0], commitment_tx[0].clone());
- assert_eq!(node_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
- assert_ne!(node_txn[0].lock_time, 0);
- assert!(node_txn[0].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
- check_spends!(node_txn[1], chan_1.3.clone());
- check_spends!(node_txn[2], node_txn[1].clone());
- assert_eq!(node_txn[1].input[0].witness.clone().last().unwrap().len(), 71);
- assert_eq!(node_txn[2].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
- assert!(node_txn[2].output[0].script_pubkey.is_v0_p2wsh()); // revokeable output
- assert_ne!(node_txn[2].lock_time, 0);
+ check_tx_local_broadcast!(nodes[0], true, commitment_tx[0], chan_1.3);
}
#[test]
let commitment_tx = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
check_spends!(commitment_tx[0], chan_2.3.clone());
nodes[2].node.fail_htlc_backwards(&payment_hash, 0);
- {
- let mut added_monitors = nodes[2].chan_monitor.added_monitors.lock().unwrap();
- assert_eq!(added_monitors.len(), 1);
- added_monitors.clear();
- }
+ check_added_monitors!(nodes[2], 0);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 1);
+
let events = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
_ => panic!("Unexpected event"),
};
nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
- let events = nodes[2].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[2]);
let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 1 (commitment tx)
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan_2.3.clone());
}
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![timeout_tx]}, 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[1], 0);
+ check_closed_broadcast!(nodes[1]);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
- assert_eq!(events.len(), 2);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
- _ => panic!("Unexpected event"),
- }
- match events[1] {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, .. } } => {
assert!(update_add_htlcs.is_empty());
assert!(!update_fail_htlcs.is_empty());
check_spends!(commitment_tx[0], chan_1.3.clone());
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 200);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0]);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Timeout tx), ChannelMonitor : 2 (timeout tx) * 2 block-rescan
assert_eq!(node_txn.len(), 4);
assert_eq!(node_txn[0], node_txn[3]);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
+ check_added_monitors!(nodes[1], 0);
+ check_closed_broadcast!(nodes[1]);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
check_added_monitors!(nodes[1], 1);
- assert_eq!(events.len(), 2);
+ let events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
- _ => panic!("Unexpected event"),
- }
- match events[1] {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
assert!(update_add_htlcs.is_empty());
assert_eq!(update_fail_htlcs.len(), 1);
}
}
-fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool) {
+fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use_dust: bool, no_to_remote: bool) {
// Test that if our counterparty broadcasts a revoked commitment transaction we fail all
// pending HTLCs on that channel backwards even if the HTLCs aren't present in our latest
// commitment transaction anymore.
create_announced_chan_between_nodes(&nodes, 0, 1);
let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
- let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+ let (payment_preimage, _payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], if no_to_remote { 10_000 } else { 3_000_000 });
// Get the will-be-revoked local txn from nodes[2]
let revoked_local_txn = nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn[0].output.len(), if no_to_remote { 1 } else { 2 });
// Revoke the old state
claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage);
- let (_, first_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
- let (_, second_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
- let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 3000000);
+ let value = if use_dust {
+ // The dust limit applied to HTLC outputs considers the fee of the HTLC transaction as
+ // well, so HTLCs at exactly the dust limit will not be included in commitment txn.
+ nodes[2].node.channel_state.lock().unwrap().by_id.get(&chan_2.2).unwrap().our_dust_limit_satoshis * 1000
+ } else { 3000000 };
+
+ let (_, first_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+ let (_, second_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
+ let (_, third_payment_hash) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
assert!(nodes[2].node.fail_htlc_backwards(&first_payment_hash, 0));
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// Drop the last RAA from 3 -> 2
assert!(nodes[2].node.fail_htlc_backwards(&second_payment_hash, 0));
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
check_added_monitors!(nodes[2], 1);
assert!(nodes[2].node.fail_htlc_backwards(&third_payment_hash, 0));
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
if deliver_bs_raa {
nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_raa).unwrap();
- // One monitor for the new revocation preimage, one as we generate a commitment for
- // nodes[0] to fail first_payment_hash backwards.
- check_added_monitors!(nodes[1], 2);
+ // One monitor for the new revocation preimage, no second on as we won't generate a new
+ // commitment transaction for nodes[0] until process_pending_htlc_forwards().
+ check_added_monitors!(nodes[1], 1);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+ // Deliberately don't process the pending fail-back so they all fail back at once after
+ // block connection just like the !deliver_bs_raa case
}
let mut failed_htlcs = HashSet::new();
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), if deliver_bs_raa { 1 } else { 2 });
match events[0] {
Event::PaymentFailed { ref payment_hash, .. } => {
assert_eq!(*payment_hash, fourth_payment_hash);
},
_ => panic!("Unexpected event"),
}
-
if !deliver_bs_raa {
- // If we delivered the RAA already then we already failed first_payment_hash backwards.
- check_added_monitors!(nodes[1], 1);
+ match events[1] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
}
+ nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
+ nodes[1].node.process_pending_htlc_forwards();
+ check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), if deliver_bs_raa { 3 } else { 2 });
- match events[if deliver_bs_raa { 2 } else { 0 }] {
+ match events[if deliver_bs_raa { 1 } else { 0 }] {
MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { .. } } => {},
_ => panic!("Unexpected event"),
}
_ => panic!("Unexpected event"),
}
}
- // Due to the way backwards-failing occurs we do the updates in two steps.
- let updates = match events[1] {
+ match events[if deliver_bs_raa { 2 } else { 1 }] {
MessageSendEvent::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, ref update_fulfill_htlcs, ref update_fail_malformed_htlcs, ref commitment_signed, .. } } => {
assert!(update_add_htlcs.is_empty());
- assert_eq!(update_fail_htlcs.len(), 1);
+ assert_eq!(update_fail_htlcs.len(), 3);
assert!(update_fulfill_htlcs.is_empty());
assert!(update_fail_malformed_htlcs.is_empty());
assert_eq!(nodes[0].node.get_our_node_id(), *node_id);
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[0]).unwrap();
- nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed).unwrap();
- check_added_monitors!(nodes[0], 1);
- let (as_revoke_and_ack, as_commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
- nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap();
- check_added_monitors!(nodes[1], 1);
- let bs_second_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
- nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed).unwrap();
- check_added_monitors!(nodes[1], 1);
- let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
- check_added_monitors!(nodes[0], 1);
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[1]).unwrap();
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &update_fail_htlcs[2]).unwrap();
- if !deliver_bs_raa {
- // If we delievered B's RAA we got an unknown preimage error, not something
- // that we should update our routing table for.
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
+ commitment_signed_dance!(nodes[0], nodes[1], commitment_signed, false, true);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ // If we delievered B's RAA we got an unknown preimage error, not something
+ // that we should update our routing table for.
+ assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 3 });
+ for event in events {
+ match event {
MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
_ => panic!("Unexpected event"),
}
}
let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), 3);
match events[0] {
Event::PaymentFailed { ref payment_hash, .. } => {
assert!(failed_htlcs.insert(payment_hash.0));
},
_ => panic!("Unexpected event"),
}
-
- bs_second_update
- },
- _ => panic!("Unexpected event"),
- };
-
- assert!(updates.update_add_htlcs.is_empty());
- assert_eq!(updates.update_fail_htlcs.len(), 2);
- assert!(updates.update_fulfill_htlcs.is_empty());
- assert!(updates.update_fail_malformed_htlcs.is_empty());
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
- nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap();
- commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false, true);
-
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 2);
- for event in events {
- match event {
- MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
- }
-
- let events = nodes[0].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 2);
- match events[0] {
- Event::PaymentFailed { ref payment_hash, .. } => {
- assert!(failed_htlcs.insert(payment_hash.0));
- },
- _ => panic!("Unexpected event"),
- }
- match events[1] {
- Event::PaymentFailed { ref payment_hash, .. } => {
- assert!(failed_htlcs.insert(payment_hash.0));
+ match events[1] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert!(failed_htlcs.insert(payment_hash.0));
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[2] {
+ Event::PaymentFailed { ref payment_hash, .. } => {
+ assert!(failed_htlcs.insert(payment_hash.0));
+ },
+ _ => panic!("Unexpected event"),
+ }
},
_ => panic!("Unexpected event"),
}
}
#[test]
-fn test_commitment_revoked_fail_backward_exhaustive() {
- do_test_commitment_revoked_fail_backward_exhaustive(false);
- do_test_commitment_revoked_fail_backward_exhaustive(true);
+fn test_commitment_revoked_fail_backward_exhaustive_a() {
+ do_test_commitment_revoked_fail_backward_exhaustive(false, true, false);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, true, false);
+ do_test_commitment_revoked_fail_backward_exhaustive(false, false, false);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, false, false);
+}
+
+#[test]
+fn test_commitment_revoked_fail_backward_exhaustive_b() {
+ do_test_commitment_revoked_fail_backward_exhaustive(false, true, true);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, true, true);
+ do_test_commitment_revoked_fail_backward_exhaustive(false, false, true);
+ do_test_commitment_revoked_fail_backward_exhaustive(true, false, true);
}
#[test]
route_payment(&nodes[0], &[&nodes[1]], 10000000);
nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
- {
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
- assert_eq!(flags & 0b10, 0b10);
- },
- _ => panic!("Unexpected event"),
- }
- }
+ check_closed_broadcast!(nodes[0]);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 2);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
-
- {
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
- assert_eq!(flags & 0b10, 0b10);
- },
- _ => panic!("Unexpected event"),
- }
- }
+ check_closed_broadcast!(nodes[1]);
// Duplicate the block_connected call since this may happen due to other listeners
// registering new transactions
// transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
- let events_3 = nodes[2].node.get_and_clear_pending_msg_events();
- assert_eq!(events_3.len(), 1);
- match events_3[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
- assert_eq!(flags & 0b10, 0b10);
- },
- _ => panic!("Unexpected event"),
- }
-
+ check_closed_broadcast!(nodes[2]);
let tx = {
let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
// Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
- let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
// Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
- assert_eq!(events_4.len(), 1);
- match events_4[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
- assert_eq!(flags & 0b10, 0b10);
- },
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
while !headers.is_empty() {
nodes[0].node.block_disconnected(&headers.pop().unwrap());
}
- {
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
- assert_eq!(flags & 0b10, 0b10);
- },
- _ => panic!("Unexpected event"),
- }
- }
+ check_closed_broadcast!(nodes[0]);
let channel_state = nodes[0].node.channel_state.lock().unwrap();
assert_eq!(channel_state.by_id.len(), 0);
assert_eq!(channel_state.short_to_id.len(), 0);
*nodes[0].chan_monitor.update_ret.lock().unwrap() = Err(ChannelMonitorUpdateErr::PermanentFailure);
nodes[0].node.test_restore_channel_monitor();
check_added_monitors!(nodes[0], 1);
-
- let events_5 = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events_5.len(), 1);
- match events_5[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0]);
// TODO: Once we hit the chain with the failure transaction we should check that we get a
// PaymentFailed event
// Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA
assert!(nodes[2].node.fail_htlc_backwards(&payment_hash_1, 0));
+ expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
// update_add update.
*nodes[1].chan_monitor.update_ret.lock().unwrap() = Ok(());
nodes[1].node.test_restore_channel_monitor();
- check_added_monitors!(nodes[1], 2);
+ check_added_monitors!(nodes[1], 1);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events();
if test_ignore_second_cs {
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000);
nodes[1].node.force_close_channel(&chan.2);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3.clone());
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000);
nodes[0].node.force_close_channel(&chan.2);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0]);
+
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3.clone());
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 0);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
let spend_txn = check_spendable_outputs!(nodes[1], 1);
assert_eq!(spend_txn.len(), 2);
assert_eq!(spend_txn[0], spend_txn[1]);
claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
+
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
let spend_txn = check_spendable_outputs!(nodes[1], 1);
assert_eq!(spend_txn.len(), 4);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
+
let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn.pop().unwrap(), node_txn[0]);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
// A will generate HTLC-Timeout from revoked commitment tx
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0]);
+
let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(revoked_htlc_txn.len(), 3);
assert_eq!(revoked_htlc_txn[0], revoked_htlc_txn[2]);
// B will generate justice tx from A's revoked commitment/HTLC tx
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 4);
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
// B will generate HTLC-Success from revoked commitment tx
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[1]);
let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(revoked_htlc_txn.len(), 3);
// A will generate justice tx from B's revoked commitment/HTLC tx
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone(), revoked_htlc_txn[0].clone()] }, 1);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[0]);
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn.len(), 4);
assert!(updates.update_fail_malformed_htlcs.is_empty());
nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_tx[0].clone()]}, 1);
- let events = nodes[2].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+ check_closed_broadcast!(nodes[2]);
let c_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); // ChannelManager : 2 (commitment tx, HTLC-Success tx), ChannelMonitor : 1 (HTLC-Success tx)
assert_eq!(c_txn.len(), 3);
assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment
assert_eq!(b_txn[2].lock_time, 0); // Success tx
- let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
- match msg_events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexpected event"),
- }
+
+ check_closed_broadcast!(nodes[1]);
}
#[test]
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
+ check_closed_broadcast!(nodes[1]);
+
let htlc_timeout_tx;
{ // Extract one of the two HTLC-Timeout transaction
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
htlc_timeout_tx = node_txn[1].clone();
}
- let events = nodes[1].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexepected event"),
- }
-
nodes[2].node.claim_funds(our_payment_preimage);
nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![commitment_txn[0].clone()] }, 1);
check_added_monitors!(nodes[2], 2);
check_spends!(htlc_success_txn[1], commitment_txn[0].clone());
nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![htlc_timeout_tx] }, 200);
+ expect_pending_htlcs_forwardable!(nodes[1]);
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
check_spends!(spend_txn[1], node_txn[2].clone());
}
+fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
+ // Test that we fail backwards the full set of HTLCs we need to when remote broadcasts an
+ // unrevoked commitment transaction.
+ // This includes HTLCs which were below the dust threshold as well as HTLCs which were awaiting
+ // a remote RAA before they could be failed backwards (and combinations thereof).
+ // We also test duplicate-hash HTLCs by adding two nodes on each side of the target nodes which
+ // use the same payment hashes.
+ // Thus, we use a six-node network:
+ //
+ // A \ / E
+ // - C - D -
+ // B / \ F
+ // And test where C fails back to A/B when D announces its latest commitment transaction
+ let nodes = create_network(6);
+
+ create_announced_chan_between_nodes(&nodes, 0, 2);
+ create_announced_chan_between_nodes(&nodes, 1, 2);
+ let chan = create_announced_chan_between_nodes(&nodes, 2, 3);
+ create_announced_chan_between_nodes(&nodes, 3, 4);
+ create_announced_chan_between_nodes(&nodes, 3, 5);
+
+ // Rebalance and check output sanity...
+ send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
+ send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
+ assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 2);
+
+ let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().our_dust_limit_satoshis;
+ // 0th HTLC:
+ let (_, payment_hash_1) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ // 1st HTLC:
+ let (_, payment_hash_2) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV).unwrap();
+ // 2nd HTLC:
+ send_along_route_with_hash(&nodes[1], route.clone(), &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_1); // not added < dust limit + HTLC tx fee
+ // 3rd HTLC:
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_2); // not added < dust limit + HTLC tx fee
+ // 4th HTLC:
+ let (_, payment_hash_3) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ // 5th HTLC:
+ let (_, payment_hash_4) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ // 6th HTLC:
+ send_along_route_with_hash(&nodes[1], route.clone(), &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_3);
+ // 7th HTLC:
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_4);
+
+ // 8th HTLC:
+ let (_, payment_hash_5) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 1000000);
+ // 9th HTLC:
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), ds_dust_limit*1000, TEST_FINAL_CLTV).unwrap();
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], ds_dust_limit*1000, payment_hash_5); // not added < dust limit + HTLC tx fee
+
+ // 10th HTLC:
+ let (_, payment_hash_6) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
+ // 11th HTLC:
+ let route = nodes[1].router.get_route(&nodes[5].node.get_our_node_id(), None, &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ send_along_route_with_hash(&nodes[1], route, &[&nodes[2], &nodes[3], &nodes[5]], 1000000, payment_hash_6);
+
+ // Double-check that six of the new HTLC were added
+ // We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
+ // with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
+ assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.len(), 1);
+ assert_eq!(nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn[0].output.len(), 8);
+
+ // Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
+ // Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_1, ds_dust_limit*1000));
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_3, 1000000));
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_5, 1000000));
+ assert!(nodes[4].node.fail_htlc_backwards(&payment_hash_6, ds_dust_limit*1000));
+ check_added_monitors!(nodes[4], 0);
+ expect_pending_htlcs_forwardable!(nodes[4]);
+ check_added_monitors!(nodes[4], 1);
+
+ let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[0]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[1]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[2]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[4].node.get_our_node_id(), &four_removes.update_fail_htlcs[3]).unwrap();
+ commitment_signed_dance!(nodes[3], nodes[4], four_removes.commitment_signed, false);
+
+ // Fail 3rd below-dust and 7th above-dust HTLCs
+ assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_2, ds_dust_limit*1000));
+ assert!(nodes[5].node.fail_htlc_backwards(&payment_hash_4, 1000000));
+ check_added_monitors!(nodes[5], 0);
+ expect_pending_htlcs_forwardable!(nodes[5]);
+ check_added_monitors!(nodes[5], 1);
+
+ let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
+ nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[0]).unwrap();
+ nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]).unwrap();
+ commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
+
+ let ds_prev_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+ expect_pending_htlcs_forwardable!(nodes[3]);
+ check_added_monitors!(nodes[3], 1);
+ let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[1]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[2]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[3]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[4]).unwrap();
+ nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[5]).unwrap();
+ if deliver_last_raa {
+ commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false);
+ } else {
+ let _cs_last_raa = commitment_signed_dance!(nodes[2], nodes[3], six_removes.commitment_signed, false, true, false, true);
+ }
+
+ // D's latest commitment transaction now contains 1st + 2nd + 9th HTLCs (implicitly, they're
+ // below the dust limit) and the 5th + 6th + 11th HTLCs. It has failed back the 0th, 3rd, 4th,
+ // 7th, 8th, and 10th, but as we haven't yet delivered the final RAA to C, the fails haven't
+ // propagated back to A/B yet (and D has two unrevoked commitment transactions).
+ //
+ // We now broadcast the latest commitment transaction, which *should* result in failures for
+ // the 0th, 1st, 2nd, 3rd, 4th, 7th, 8th, 9th, and 10th HTLCs, ie all the below-dust HTLCs and
+ // the non-broadcast above-dust HTLCs.
+ //
+ // Alternatively, we may broadcast the previous commitment transaction, which should only
+ // result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
+ let ds_last_commitment_tx = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().last_local_commitment_txn.clone();
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ if announce_latest {
+ nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_last_commitment_tx[0]], &[1; 1]);
+ } else {
+ nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&ds_prev_commitment_tx[0]], &[1; 1]);
+ }
+ check_closed_broadcast!(nodes[2]);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ check_added_monitors!(nodes[2], 2);
+
+ let cs_msgs = nodes[2].node.get_and_clear_pending_msg_events();
+ assert_eq!(cs_msgs.len(), 2);
+ let mut a_done = false;
+ for msg in cs_msgs {
+ match msg {
+ MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
+ // Both under-dust HTLCs and the one above-dust HTLC that we had already failed
+ // should be failed-backwards here.
+ let target = if *node_id == nodes[0].node.get_our_node_id() {
+ // If announce_latest, expect 0th, 1st, 4th, 8th, 10th HTLCs, else only 0th, 1st, 10th below-dust HTLCs
+ for htlc in &updates.update_fail_htlcs {
+ assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 6 || if announce_latest { htlc.htlc_id == 3 || htlc.htlc_id == 5 } else { false });
+ }
+ assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 5 } else { 3 });
+ assert!(!a_done);
+ a_done = true;
+ &nodes[0]
+ } else {
+ // If announce_latest, expect 2nd, 3rd, 7th, 9th HTLCs, else only 2nd, 3rd, 9th below-dust HTLCs
+ for htlc in &updates.update_fail_htlcs {
+ assert!(htlc.htlc_id == 1 || htlc.htlc_id == 2 || htlc.htlc_id == 5 || if announce_latest { htlc.htlc_id == 4 } else { false });
+ }
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(updates.update_fail_htlcs.len(), if announce_latest { 4 } else { 3 });
+ &nodes[1]
+ };
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]).unwrap();
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[1]).unwrap();
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[2]).unwrap();
+ if announce_latest {
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[3]).unwrap();
+ if *node_id == nodes[0].node.get_our_node_id() {
+ target.node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[4]).unwrap();
+ }
+ }
+ commitment_signed_dance!(target, nodes[2], updates.commitment_signed, false, true);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ let as_events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(as_events.len(), if announce_latest { 5 } else { 3 });
+ let mut as_failds = HashSet::new();
+ for event in as_events.iter() {
+ if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
+ assert!(as_failds.insert(*payment_hash));
+ if *payment_hash != payment_hash_2 {
+ assert_eq!(*rejected_by_dest, deliver_last_raa);
+ } else {
+ assert!(!rejected_by_dest);
+ }
+ } else { panic!("Unexpected event"); }
+ }
+ assert!(as_failds.contains(&payment_hash_1));
+ assert!(as_failds.contains(&payment_hash_2));
+ if announce_latest {
+ assert!(as_failds.contains(&payment_hash_3));
+ assert!(as_failds.contains(&payment_hash_5));
+ }
+ assert!(as_failds.contains(&payment_hash_6));
+
+ let bs_events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(bs_events.len(), if announce_latest { 4 } else { 3 });
+ let mut bs_failds = HashSet::new();
+ for event in bs_events.iter() {
+ if let &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, .. } = event {
+ assert!(bs_failds.insert(*payment_hash));
+ if *payment_hash != payment_hash_1 && *payment_hash != payment_hash_5 {
+ assert_eq!(*rejected_by_dest, deliver_last_raa);
+ } else {
+ assert!(!rejected_by_dest);
+ }
+ } else { panic!("Unexpected event"); }
+ }
+ assert!(bs_failds.contains(&payment_hash_1));
+ assert!(bs_failds.contains(&payment_hash_2));
+ if announce_latest {
+ assert!(bs_failds.contains(&payment_hash_4));
+ }
+ assert!(bs_failds.contains(&payment_hash_5));
+
+ // For each HTLC which was not failed-back by normal process (ie deliver_last_raa), we should
+ // get a PaymentFailureNetworkUpdate. A should have gotten 4 HTLCs which were failed-back due
+ // to unknown-preimage-etc, B should have gotten 2. Thus, in the
+ // announce_latest && deliver_last_raa case, we should have 5-4=1 and 4-2=2
+ // PaymentFailureNetworkUpdates.
+ let as_msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(as_msg_events.len(), if deliver_last_raa { 1 } else if !announce_latest { 3 } else { 5 });
+ let bs_msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(bs_msg_events.len(), if deliver_last_raa { 2 } else if !announce_latest { 3 } else { 4 });
+ for event in as_msg_events.iter().chain(bs_msg_events.iter()) {
+ match event {
+ &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {},
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+#[test]
+fn test_fail_backwards_latest_remote_announce_a() {
+ do_test_fail_backwards_unrevoked_remote_announce(false, true);
+}
+
+#[test]
+fn test_fail_backwards_latest_remote_announce_b() {
+ do_test_fail_backwards_unrevoked_remote_announce(true, true);
+}
+
+#[test]
+fn test_fail_backwards_previous_remote_announce() {
+ do_test_fail_backwards_unrevoked_remote_announce(false, false);
+ // Note that true, true doesn't make sense as it implies we announce a revoked state, which is
+ // tested for in test_commitment_revoked_fail_backward_exhaustive()
+}
+
#[test]
fn test_dynamic_spendable_outputs_local_htlc_timeout_tx() {
let nodes = create_network(2);
// Timeout HTLC on A's chain and so it can generate a HTLC-Timeout tx
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![local_txn[0].clone()] }, 200);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- match events[0] {
- MessageSendEvent::BroadcastChannelUpdate { .. } => {},
- _ => panic!("Unexepected event"),
- }
+ check_closed_broadcast!(nodes[0]);
+
let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
assert_eq!(node_txn[0].input.len(), 1);
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT);
check_spends!(spend_txn[0], closing_tx);
}
+fn do_htlc_claim_local_commitment_only(use_dust: bool) {
+ let nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let (our_payment_preimage, _) = route_payment(&nodes[0], &[&nodes[1]], if use_dust { 50000 } else { 3000000 });
+
+ // Claim the payment, but don't deliver A's commitment_signed, resulting in the HTLC only being
+ // present in B's local commitment transaction, but none of A's commitment transactions.
+ assert!(nodes[1].node.claim_funds(our_payment_preimage));
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]).unwrap();
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentSent { payment_preimage } => {
+ assert_eq!(payment_preimage, our_payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ for i in 1..TEST_FINAL_CLTV - CLTV_CLAIM_BUFFER + CHAN_CONFIRM_DEPTH + 1 {
+ nodes[1].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
+ header.prev_blockhash = header.bitcoin_hash();
+ }
+ test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
+ check_closed_broadcast!(nodes[1]);
+}
+
+fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
+ let mut nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &Vec::new(), if use_dust { 50000 } else { 3000000 }, TEST_FINAL_CLTV).unwrap();
+ let (_, payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let _as_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ // As far as A is concerened, the HTLC is now present only in the latest remote commitment
+ // transaction, however it is not in A's latest local commitment, so we can just broadcast that
+ // to "time out" the HTLC.
+
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
+ nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
+ header.prev_blockhash = header.bitcoin_hash();
+ }
+ test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
+ check_closed_broadcast!(nodes[0]);
+}
+
+fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
+ let nodes = create_network(3);
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // Fail the payment, but don't deliver A's final RAA, resulting in the HTLC only being present
+ // in B's previous (unrevoked) commitment transaction, but none of A's commitment transactions.
+ // Also optionally test that we *don't* fail the channel in case the commitment transaction was
+ // actually revoked.
+ let htlc_value = if use_dust { 50000 } else { 3000000 };
+ let (_, our_payment_hash) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
+ assert!(nodes[1].node.fail_htlc_backwards(&our_payment_hash, htlc_value));
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]).unwrap();
+ nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let as_updates = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_updates.0).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.1).unwrap();
+ check_added_monitors!(nodes[1], 1);
+ let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
+
+ if check_revoke_no_close {
+ nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ }
+
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ for i in 1..TEST_FINAL_CLTV + HTLC_FAIL_TIMEOUT_BLOCKS + CHAN_CONFIRM_DEPTH + 1 {
+ nodes[0].chain_monitor.block_connected_checked(&header, i, &Vec::new(), &Vec::new());
+ header.prev_blockhash = header.bitcoin_hash();
+ }
+ if !check_revoke_no_close {
+ test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
+ check_closed_broadcast!(nodes[0]);
+ } else {
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentFailed { payment_hash, rejected_by_dest, .. } => {
+ assert_eq!(payment_hash, our_payment_hash);
+ assert!(rejected_by_dest);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+}
+
+// Test that we close channels on-chain when broadcastable HTLCs reach their timeout window.
+// There are only a few cases to test here:
+// * its not really normative behavior, but we test that below-dust HTLCs "included" in
+// broadcastable commitment transactions result in channel closure,
+// * its included in an unrevoked-but-previous remote commitment transaction,
+// * its included in the latest remote or local commitment transactions.
+// We test each of the three possible commitment transactions individually and use both dust and
+// non-dust HTLCs.
+// Note that we don't bother testing both outbound and inbound HTLC failures for each case, and we
+// assume they are handled the same across all six cases, as both outbound and inbound failures are
+// tested for at least one of the cases in other tests.
+#[test]
+fn htlc_claim_single_commitment_only_a() {
+ do_htlc_claim_local_commitment_only(true);
+ do_htlc_claim_local_commitment_only(false);
+
+ do_htlc_claim_current_remote_commitment_only(true);
+ do_htlc_claim_current_remote_commitment_only(false);
+}
+
+#[test]
+fn htlc_claim_single_commitment_only_b() {
+ do_htlc_claim_previous_remote_commitment_only(true, false);
+ do_htlc_claim_previous_remote_commitment_only(false, false);
+ do_htlc_claim_previous_remote_commitment_only(true, true);
+ do_htlc_claim_previous_remote_commitment_only(false, true);
+}
+
fn run_onion_failure_test<F1,F2>(_name: &str, test_case: u8, nodes: &Vec<Node>, route: &Route, payment_hash: &PaymentHash, callback_msg: F1, callback_node: F2, expected_retryable: bool, expected_error_code: Option<u16>, expected_channel_update: Option<HTLCFailChannelUpdate>)
where F1: for <'a> FnMut(&'a mut msgs::UpdateAddHTLC),
F2: FnMut(),
expect_htlc_forward!(&nodes[2]);
expect_event!(&nodes[2], Event::PaymentReceived);
callback_node();
+ expect_pending_htlcs_forwardable!(nodes[2]);
}
let update_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
// 2 => 1
nodes[1].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_msg).unwrap();
- commitment_signed_dance!(nodes[1], nodes[2], update_2_1.commitment_signed, true, true);
+ commitment_signed_dance!(nodes[1], nodes[2], update_2_1.commitment_signed, true);
// backward fail on 1
let update_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
msg.onion_routing_packet = onion_packet;
}, ||{}, true, Some(21), None);
}
+
+#[test]
+fn test_update_add_htlc_bolt2_sender() {
+ use util::rng;
+ use std::sync::atomic::Ordering;
+ use super::channelmanager::HTLCSource;
+ use super::channel::ChannelError;
+
+ let secp_ctx = Secp256k1::new();
+
+ // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message.
+
+ // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve.
+ //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.
+
+ // BOLT2 Requirement: MUST offer amount_msat greater than 0.
+ // BOLT2 Requirement: MUST NOT offer amount_msat below the receiving node's htlc_minimum_msat (same validation check catches both of these)
+ let mut nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let session_priv = SecretKey::from_slice(&secp_ctx, &{
+ let mut session_key = [0; 32];
+ rng::fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+
+ let err = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().send_htlc(0, our_payment_hash, TEST_FINAL_CLTV, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: 0,
+ }, onion_packet);
+
+ if let Err(ChannelError::Ignore(msg)) = err {
+ assert_eq!(msg, "Cannot send less than their minimum HTLC value");
+ } else {
+ assert!(false);
+ }
+
+ //BOLT 2 Requirement: MUST set cltv_expiry less than 500000000.
+ //TODO: This is not currently explicitly checked when sending an HTLC and exists as TODO in the channel::send_htlc(...) function
+ //It is enforced when constructing a route.
+
+ // BOLT 2 Requirement: if result would be offering more than the remote's max_accepted_htlcs HTLCs, in the remote commitment transaction: MUST NOT add an HTLC.
+ let mut nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let session_priv = SecretKey::from_slice(&secp_ctx, &{
+ let mut session_key = [0; 32];
+ rng::fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+
+ let max_accepted_htlcs = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().their_max_accepted_htlcs;
+
+ for _i in 0..max_accepted_htlcs {
+ let _ = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().send_htlc(10000, our_payment_hash, TEST_FINAL_CLTV, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: 0,
+ }, onion_packet.clone());
+ }
+
+ let err = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().send_htlc(10000, our_payment_hash, TEST_FINAL_CLTV, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: 0,
+ }, onion_packet);
+
+ if let Err(ChannelError::Ignore(msg)) = err {
+ assert_eq!(msg, "Cannot push more than their max accepted HTLCs");
+ } else {
+ assert!(false);
+ }
+
+ // BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let session_priv = SecretKey::from_slice(&secp_ctx, &{
+ let mut session_key = [0; 32];
+ rng::fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+
+ let err = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().send_htlc(10000001, our_payment_hash, TEST_FINAL_CLTV, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: 0,
+ }, onion_packet);
+
+ if let Err(ChannelError::Ignore(msg)) = err {
+ assert_eq!(msg, "Cannot send value that would put us over our max HTLC value in flight");
+ } else {
+ assert!(false);
+ }
+
+ // BOLT 2 Requirement: if the sum of total offered HTLCs would exceed the remote's max_htlc_value_in_flight_msat: MUST NOT add an HTLC.
+ // BOLT 2 Requirement: MUST increase the value of id by 1 for each successive offer.
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let session_priv = SecretKey::from_slice(&secp_ctx, &{
+ let mut session_key = [0; 32];
+ rng::fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, _htlc_msat, _htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+
+ for expected_id in 0..2 {
+ let res = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&chan.2).unwrap().send_htlc(100000, our_payment_hash, TEST_FINAL_CLTV, HTLCSource::OutboundRoute {
+ route: route.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: 0,
+ }, onion_packet.clone());
+
+ if let Ok(Some(msg)) = res {
+ assert_eq!(msg.htlc_id, expected_id);
+ } else {
+ assert!(false);
+ }
+ }
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() {
+ use super::msgs::HandleError;
+
+ //BOLT2 Requirement: receiving an amount_msat equal to 0, OR less than its own htlc_minimum_msat -> SHOULD fail the channel.
+ let mut nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
+ let htlc_minimum_msat: u64;
+ {
+ let chan_lock = nodes[0].node.channel_state.lock().unwrap();
+ let channel = chan_lock.by_id.get(&chan.2).unwrap();
+ htlc_minimum_msat = channel.get_our_htlc_minimum_msat();
+ }
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], htlc_minimum_msat+1, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ updates.update_add_htlcs[0].amount_msat = htlc_minimum_msat-1;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ if let Err(HandleError{err, action: _}) = err {
+ assert_eq!(err, "Remote side tried to send less than our minimum HTLC value");
+ } else {
+ assert!(false);
+ }
+
+ //Confirm the channel was closed
+ {
+ assert_eq!(nodes[1].node.channel_state.lock().unwrap().by_id.len(), 0);
+ }
+ //Clear unhandled msg events.
+ let _ = nodes[1].node.get_and_clear_pending_msg_events();
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_sender_can_afford_amount_sent() {
+ use super::msgs::HandleError;
+
+ //BOLT2 Requirement: receiving an amount_msat that the sending node cannot afford at the current feerate_per_kw (while maintaining its channel reserve): SHOULD fail the channel
+ let mut nodes = create_network(2);
+ let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+
+ updates.update_add_htlcs[0].amount_msat = 4000001;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ if let Err(HandleError{err, action: _}) = err {
+ assert_eq!(err, "Remote HTLC add would put them over their reserve value");
+ } else {
+ assert!(false);
+ }
+
+ //Confirm the channel was closed
+ {
+ assert_eq!(nodes[1].node.channel_state.lock().unwrap().by_id.len(), 0);
+ }
+ //Clear unhandled msg events.
+ let _ = nodes[1].node.get_and_clear_pending_msg_events();
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() {
+ use util::rng;
+ let secp_ctx = Secp256k1::new();
+ use super::msgs::HandleError;
+
+ //BOLT 2 Requirement: if a sending node adds more than its max_accepted_htlcs HTLCs to its local commitment transaction: SHOULD fail the channel
+ //BOLT 2 Requirement: MUST allow multiple HTLCs with the same payment_hash.
+ let mut nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let session_priv = SecretKey::from_slice(&secp_ctx, &{
+ let mut session_key = [0; 32];
+ rng::fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = onion_utils::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, _htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+
+ let mut msg = msgs::UpdateAddHTLC {
+ channel_id: chan.2,
+ htlc_id: 0,
+ amount_msat: 1000,
+ payment_hash: our_payment_hash,
+ cltv_expiry: htlc_cltv,
+ onion_routing_packet: onion_packet.clone(),
+ };
+
+ for i in 0..super::channel::OUR_MAX_HTLCS {
+ msg.htlc_id = i as u64;
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).unwrap();
+ }
+ msg.htlc_id = (super::channel::OUR_MAX_HTLCS + 1) as u64;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg);
+
+ if let Err(HandleError{err, action: _}) = err {
+ assert_eq!(err, "Remote tried to push more than our max accepted HTLCs");
+ } else {
+ assert!(false);
+ }
+
+ //Confirm the channel was closed
+ {
+ assert_eq!(nodes[1].node.channel_state.lock().unwrap().by_id.len(), 0);
+ }
+ //Clear unhandled msg events.
+ let _ = nodes[1].node.get_and_clear_pending_msg_events();
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() {
+ use super::msgs::HandleError;
+
+ //OR adds more than its max_htlc_value_in_flight_msat worth of offered HTLCs to its local commitment transaction: SHOULD fail the channel
+ let mut nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 1000000, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ updates.update_add_htlcs[0].amount_msat = nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().their_max_htlc_value_in_flight_msat + 1;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ if let Err(HandleError{err, action: _}) = err {
+ assert_eq!(err,"Remote HTLC add would put them over their max HTLC value in flight");
+ } else {
+ assert!(false);
+ }
+
+ //Confirm the channel was closed
+ {
+ assert_eq!(nodes[1].node.channel_state.lock().unwrap().by_id.len(), 0);
+ }
+ //Clear unhandled msg events.
+ let _ = nodes[1].node.get_and_clear_pending_msg_events();
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_cltv_expiry() {
+ use super::msgs::HandleError;
+
+ //BOLT2 Requirement: if sending node sets cltv_expiry to greater or equal to 500000000: SHOULD fail the channel.
+ let mut nodes = create_network(2);
+ let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ updates.update_add_htlcs[0].cltv_expiry = 500000000;
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+
+ if let Err(HandleError{err, action: _}) = err {
+ assert_eq!(err,"Remote provided CLTV expiry in seconds instead of block height");
+ } else {
+ assert!(false);
+ }
+
+ //Confirm the channel was closed
+ {
+ assert_eq!(nodes[1].node.channel_state.lock().unwrap().by_id.len(), 0);
+ }
+ //Clear unhandled msg events.
+ let _ = nodes[1].node.get_and_clear_pending_msg_events();
+}
+
+#[test]
+fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() {
+ use super::msgs::HandleError;
+
+ //BOLT 2 requirement: if the sender did not previously acknowledge the commitment of that HTLC: MUST ignore a repeated id value after a reconnection.
+ let mut nodes = create_network(2);
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000);
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), None, &[], 3999999, TEST_FINAL_CLTV).unwrap();
+ let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ let _ = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+ assert_eq!(nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().next_remote_htlc_id, 1);
+
+ //Disconnect and Reconnect
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id());
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ assert_eq!(reestablish_1.len(), 1);
+ nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id());
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ assert_eq!(reestablish_2.len(), 1);
+ nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &reestablish_2[0]).unwrap();
+ let _ = handle_chan_reestablish_msgs!(nodes[0], nodes[1]);
+ nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &reestablish_1[0]).unwrap();
+ let _ = handle_chan_reestablish_msgs!(nodes[1], nodes[0]);
+ let _ = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
+ //Confirm the HTLC was ignored
+ assert_eq!(nodes[1].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().next_remote_htlc_id, 1);
+
+ //Clear unhandled msg events
+ let _ = nodes[1].node.get_and_clear_pending_msg_events();
+}
\ No newline at end of file