+
+ macro_rules! expect_payment_received {
+ ($node: expr, $expected_payment_hash: expr, $expected_recv_value: expr) => {
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!($expected_payment_hash, *payment_hash);
+ assert_eq!($expected_recv_value, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ };
+
+ let feemsat = 239; // somehow we know?
+ let total_fee_msat = (nodes.len() - 2) as u64 * 239;
+
+ let recv_value_0 = stat01.their_max_htlc_value_in_flight_msat - total_fee_msat;
+
+ // attempt to send amt_msat > their_max_htlc_value_in_flight_msat
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
+ assert!(route.hops.iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
+ let err = nodes[0].node.send_payment(route, our_payment_hash).err().unwrap();
+ match err {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our max HTLC value in flight"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ let mut htlc_id = 0;
+ // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete
+ // nodes[0]'s wealth
+ loop {
+ let amt_msat = recv_value_0 + total_fee_msat;
+ if stat01.value_to_self_msat - amt_msat < stat01.channel_reserve_msat {
+ break;
+ }
+ send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_0);
+ htlc_id += 1;
+
+ let (stat01_, stat11_, stat12_, stat22_) = (
+ get_channel_value_stat!(nodes[0], chan_1.2),
+ get_channel_value_stat!(nodes[1], chan_1.2),
+ get_channel_value_stat!(nodes[1], chan_2.2),
+ get_channel_value_stat!(nodes[2], chan_2.2),
+ );
+
+ assert_eq!(stat01_.value_to_self_msat, stat01.value_to_self_msat - amt_msat);
+ assert_eq!(stat11_.value_to_self_msat, stat11.value_to_self_msat + amt_msat);
+ assert_eq!(stat12_.value_to_self_msat, stat12.value_to_self_msat - (amt_msat - feemsat));
+ assert_eq!(stat22_.value_to_self_msat, stat22.value_to_self_msat + (amt_msat - feemsat));
+ stat01 = stat01_; stat11 = stat11_; stat12 = stat12_; stat22 = stat22_;
+ }
+
+ {
+ let recv_value = stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat;
+ // attempt to get channel_reserve violation
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value + 1);
+ let err = nodes[0].node.send_payment(route.clone(), our_payment_hash).err().unwrap();
+ match err {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ // adding pending output
+ let recv_value_1 = (stat01.value_to_self_msat - stat01.channel_reserve_msat - total_fee_msat)/2;
+ let amt_msat_1 = recv_value_1 + total_fee_msat;
+
+ let (route_1, our_payment_hash_1, our_payment_preimage_1) = get_route_and_payment_hash!(recv_value_1);
+ let payment_event_1 = {
+ nodes[0].node.send_payment(route_1, our_payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_1.msgs[0]).unwrap();
+
+ // channel reserve test with htlc pending output > 0
+ let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat;
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+ match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ {
+ // test channel_reserve test on nodes[1] side
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1);
+
+ // Need to manually create update_add_htlc message to go around the channel reserve check in send_htlc()
+ let secp_ctx = Secp256k1::new();
+ let session_priv = SecretKey::from_slice(&secp_ctx, &{
+ let mut session_key = [0; 32];
+ rng::fill_bytes(&mut session_key);
+ session_key
+ }).expect("RNG is bad!");
+
+ let cur_height = nodes[0].node.latest_block_height.load(Ordering::Acquire) as u32 + 1;
+ let onion_keys = ChannelManager::construct_onion_keys(&secp_ctx, &route, &session_priv).unwrap();
+ let (onion_payloads, htlc_msat, htlc_cltv) = ChannelManager::build_onion_payloads(&route, cur_height).unwrap();
+ let onion_packet = ChannelManager::construct_onion_packet(onion_payloads, onion_keys, &our_payment_hash);
+ let msg = msgs::UpdateAddHTLC {
+ channel_id: chan_1.2,
+ htlc_id,
+ amount_msat: htlc_msat,
+ payment_hash: our_payment_hash,
+ cltv_expiry: htlc_cltv,
+ onion_routing_packet: onion_packet,
+ };
+
+ let err = nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg).err().unwrap();
+ match err {
+ HandleError{err, .. } => assert_eq!(err, "Remote HTLC add would put them over their reserve value"),
+ }
+ }
+
+ // split the rest to test holding cell
+ let recv_value_21 = recv_value_2/2;
+ let recv_value_22 = recv_value_2 - recv_value_21 - total_fee_msat;
+ {
+ let stat = get_channel_value_stat!(nodes[0], chan_1.2);
+ assert_eq!(stat.value_to_self_msat - (stat.pending_outbound_htlcs_amount_msat + recv_value_21 + recv_value_22 + total_fee_msat + total_fee_msat), stat.channel_reserve_msat);
+ }
+
+ // now see if they go through on both sides
+ let (route_21, our_payment_hash_21, our_payment_preimage_21) = get_route_and_payment_hash!(recv_value_21);
+ // but this will stuck in the holding cell
+ nodes[0].node.send_payment(route_21, our_payment_hash_21).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 0);
+
+ // test with outbound holding cell amount > 0
+ {
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1);
+ match nodes[0].node.send_payment(route, our_payment_hash).err().unwrap() {
+ APIError::ChannelUnavailable{err} => assert_eq!(err, "Cannot send value that would put us over our reserve value"),
+ _ => panic!("Unknown error variants"),
+ }
+ }
+
+ let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22);
+ // this will also stuck in the holding cell
+ nodes[0].node.send_payment(route_22, our_payment_hash_22).unwrap();
+ check_added_monitors!(nodes[0], 0);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 0);
+
+ // flush the pending htlc
+ let (as_revoke_and_ack, as_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event_1.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ let commitment_update_2 = nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack).unwrap().unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let (bs_revoke_and_ack, bs_none) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
+ assert!(bs_none.is_none());
+ check_added_monitors!(nodes[0], 1);
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let ref payment_event_11 = expect_forward!(nodes[1]);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_11.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[2], nodes[1], payment_event_11.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_payment_received!(nodes[2], our_payment_hash_1, recv_value_1);
+
+ // flush the htlcs in the holding cell
+ assert_eq!(commitment_update_2.update_add_htlcs.len(), 2);
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[0]).unwrap();
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &commitment_update_2.update_add_htlcs[1]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], &commitment_update_2.commitment_signed, false);
+ expect_pending_htlcs_forwardable!(nodes[1]);
+
+ let ref payment_event_3 = expect_forward!(nodes[1]);
+ assert_eq!(payment_event_3.msgs.len(), 2);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[0]).unwrap();
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event_3.msgs[1]).unwrap();
+
+ commitment_signed_dance!(nodes[2], nodes[1], &payment_event_3.commitment_msg, false);
+ expect_pending_htlcs_forwardable!(nodes[2]);
+
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(our_payment_hash_21, *payment_hash);
+ assert_eq!(recv_value_21, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(our_payment_hash_22, *payment_hash);
+ assert_eq!(recv_value_22, amt);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_1);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_21);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), our_payment_preimage_22);
+
+ let expected_value_to_self = stat01.value_to_self_msat - (recv_value_1 + total_fee_msat) - (recv_value_21 + total_fee_msat) - (recv_value_22 + total_fee_msat);
+ let stat0 = get_channel_value_stat!(nodes[0], chan_1.2);
+ assert_eq!(stat0.value_to_self_msat, expected_value_to_self);
+ assert_eq!(stat0.value_to_self_msat, stat0.channel_reserve_msat);
+
+ let stat2 = get_channel_value_stat!(nodes[2], chan_2.2);
+ assert_eq!(stat2.value_to_self_msat, stat22.value_to_self_msat + recv_value_1 + recv_value_21 + recv_value_22);
+ }
+
+ #[test]
+ fn channel_monitor_network_test() {
+ // Simple test which builds a network of ChannelManagers, connects them to each other, and
+ // tests that ChannelMonitor is able to recover from various states.
+ let nodes = create_network(5);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2);
+ let chan_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
+ let chan_4 = create_announced_chan_between_nodes(&nodes, 3, 4);
+
+ // Rebalance the network a bit by relaying one payment through all the channels...
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+ send_payment(&nodes[0], &vec!(&nodes[1], &nodes[2], &nodes[3], &nodes[4])[..], 8000000);
+
+ // Simple case with no pending HTLCs:
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), true);
+ {
+ let mut node_txn = test_txn_broadcast(&nodes[1], &chan_1, None, HTLCType::NONE);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+ test_txn_broadcast(&nodes[0], &chan_1, None, HTLCType::NONE);
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 1);
+
+ // One pending HTLC is discarded by the force-close:
+ let payment_preimage_1 = route_payment(&nodes[1], &vec!(&nodes[2], &nodes[3])[..], 3000000).0;
+
+ // Simple case of one pending HTLC to HTLC-Timeout
+ nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id(), true);
+ {
+ let mut node_txn = test_txn_broadcast(&nodes[1], &chan_2, None, HTLCType::TIMEOUT);
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[2].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn.drain(..).next().unwrap()] }, 1);
+ test_txn_broadcast(&nodes[2], &chan_2, None, HTLCType::NONE);
+ }
+ get_announce_close_broadcast_events(&nodes, 1, 2);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+ assert_eq!(nodes[2].node.list_channels().len(), 1);
+
+ macro_rules! claim_funds {
+ ($node: expr, $prev_node: expr, $preimage: expr) => {
+ {
+ assert!($node.node.claim_funds($preimage));
+ check_added_monitors!($node, 1);
+
+ let events = $node.node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::UpdateHTLCs { ref node_id, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fail_htlcs, .. } } => {
+ assert!(update_add_htlcs.is_empty());
+ assert!(update_fail_htlcs.is_empty());
+ assert_eq!(*node_id, $prev_node.node.get_our_node_id());
+ },
+ _ => panic!("Unexpected event"),
+ };
+ }
+ }
+ }
+
+ // nodes[3] gets the preimage, but nodes[2] already disconnected, resulting in a nodes[2]
+ // HTLC-Timeout and a nodes[3] claim against it (+ its own announces)
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), true);
+ {
+ let node_txn = test_txn_broadcast(&nodes[2], &chan_3, None, HTLCType::TIMEOUT);
+
+ // Claim the payment on nodes[3], giving it knowledge of the preimage
+ claim_funds!(nodes[3], nodes[2], payment_preimage_1);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[3].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, 1);
+
+ check_preimage_claim(&nodes[3], &node_txn);
+ }
+ get_announce_close_broadcast_events(&nodes, 2, 3);
+ assert_eq!(nodes[2].node.list_channels().len(), 0);
+ assert_eq!(nodes[3].node.list_channels().len(), 1);
+
+ { // Cheat and reset nodes[4]'s height to 1
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![] }, 1);
+ }
+
+ assert_eq!(nodes[3].node.latest_block_height.load(Ordering::Acquire), 1);
+ assert_eq!(nodes[4].node.latest_block_height.load(Ordering::Acquire), 1);
+ // One pending HTLC to time out:
+ let payment_preimage_2 = route_payment(&nodes[3], &vec!(&nodes[4])[..], 3000000).0;
+ // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for
+ // buffer space).
+
+ {
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[3].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+ for i in 3..TEST_FINAL_CLTV + 2 + HTLC_FAIL_TIMEOUT_BLOCKS + 1 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[3].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
+ }
+
+ let node_txn = test_txn_broadcast(&nodes[3], &chan_4, None, HTLCType::TIMEOUT);
+
+ // Claim the payment on nodes[4], giving it knowledge of the preimage
+ claim_funds!(nodes[4], nodes[3], payment_preimage_2);
+
+ header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]);
+ for i in 3..TEST_FINAL_CLTV + 2 - CLTV_CLAIM_BUFFER + 1 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]);
+ }
+
+ test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
+
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[4].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5);
+
+ check_preimage_claim(&nodes[4], &node_txn);
+ }
+ get_announce_close_broadcast_events(&nodes, 3, 4);
+ assert_eq!(nodes[3].node.list_channels().len(), 0);
+ assert_eq!(nodes[4].node.list_channels().len(), 0);
+
+ // Create some new channels:
+ let chan_5 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // A pending HTLC which will be revoked:
+ let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ // Get the will-be-revoked local txn from nodes[0]
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.iter().next().unwrap().1.last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn.len(), 2); // First commitment tx, then HTLC tx
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_5.3.txid());
+ assert_eq!(revoked_local_txn[0].output.len(), 2); // Only HTLC and output back to 0 are present
+ assert_eq!(revoked_local_txn[1].input.len(), 1);
+ assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
+ assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
+ // Revoke the old state
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_3);
+
+ {
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ {
+ let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn.pop().unwrap(), node_txn[0]); // An outpoint registration will result in a 2nd block_connected
+ assert_eq!(node_txn[0].input.len(), 2); // We should claim the revoked output and the HTLC output
+
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+ node_txn.swap_remove(0);
+ }
+ test_txn_broadcast(&nodes[1], &chan_5, None, HTLCType::NONE);
+
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ let node_txn = test_txn_broadcast(&nodes[0], &chan_5, Some(revoked_local_txn[0].clone()), HTLCType::TIMEOUT);
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![node_txn[1].clone()] }, 1);
+ test_revoked_htlc_claim_txn_broadcast(&nodes[1], node_txn[1].clone());
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+ }
+
+ #[test]
+ fn revoked_output_claim() {
+ // Simple test to ensure a node will claim a revoked output when a stale remote commitment
+ // transaction is broadcast by its counterparty
+ let nodes = create_network(2);
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+ // node[0] is gonna to revoke an old state thus node[1] should be able to claim the revoked output
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn.len(), 1);
+ // Only output is the full channel value back to nodes[0]:
+ assert_eq!(revoked_local_txn[0].output.len(), 1);
+ // Send a payment through, updating everyone's latest commitment txn
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 5000000);
+
+ // Inform nodes[1] that nodes[0] broadcast a stale tx
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3); // nodes[1] will broadcast justice tx twice, and its own local state once
+
+ assert_eq!(node_txn[0], node_txn[2]);
+
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+ check_spends!(node_txn[1], chan_1.3.clone());
+
+ // Inform nodes[0] that a watchtower cheated on its behalf, so it will force-close the chan
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ }
+
+ #[test]
+ fn claim_htlc_outputs_shared_tx() {
+ // Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
+ let nodes = create_network(2);
+
+ // Create some new channel:
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // Rebalance the network to generate htlc in the two directions
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx
+ let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
+
+ // Get the will-be-revoked local txn from node[0]
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+ assert_eq!(revoked_local_txn.len(), 2); // commitment tx + 1 HTLC-Timeout tx
+ assert_eq!(revoked_local_txn[0].input.len(), 1);
+ assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan_1.3.txid());
+ assert_eq!(revoked_local_txn[1].input.len(), 1);
+ assert_eq!(revoked_local_txn[1].input[0].previous_output.txid, revoked_local_txn[0].txid());
+ assert_eq!(revoked_local_txn[1].input[0].witness.last().unwrap().len(), 133); // HTLC-Timeout
+ check_spends!(revoked_local_txn[1], revoked_local_txn[0].clone());
+
+ //Revoke the old state
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
+
+ {
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 1);
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 4);
+
+ assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs
+ check_spends!(node_txn[0], revoked_local_txn[0].clone());
+
+ assert_eq!(node_txn[0], node_txn[3]); // justice tx is duplicated due to block re-scanning
+
+ let mut witness_lens = BTreeSet::new();
+ witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[0].input[1].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[0].input[2].witness.last().unwrap().len());
+ assert_eq!(witness_lens.len(), 3);
+ assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
+ assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
+ assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
+
+ // Next nodes[1] broadcasts its current local tx state:
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager
+
+ assert_eq!(node_txn[2].input.len(), 1);
+ let witness_script = node_txn[2].clone().input[0].witness.pop().unwrap();
+ assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
+ assert_eq!(node_txn[2].input[0].previous_output.txid, node_txn[1].txid());
+ assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
+ assert_ne!(node_txn[2].input[0].previous_output.txid, node_txn[0].input[1].previous_output.txid);
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+ }
+
+ #[test]
+ fn claim_htlc_outputs_single_tx() {
+ // Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
+ let nodes = create_network(2);
+
+ let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ // Rebalance the network to generate htlc in the two directions
+ send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000);
+ // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this
+ // time as two different claim transactions as we're gonna to timeout htlc with given a high current height
+ let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0;
+ let _payment_preimage_2 = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000).0;
+
+ // Get the will-be-revoked local txn from node[0]
+ let revoked_local_txn = nodes[0].node.channel_state.lock().unwrap().by_id.get(&chan_1.2).unwrap().last_local_commitment_txn.clone();
+
+ //Revoke the old state
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
+
+ {
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+
+ nodes[0].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+
+ nodes[1].chain_monitor.block_connected_with_filtering(&Block { header, txdata: vec![revoked_local_txn[0].clone()] }, 200);
+ let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 12); // ChannelManager : 2, ChannelMontitor: 8 (1 standard revoked output, 2 revocation htlc tx, 1 local commitment tx + 1 htlc timeout tx) * 2 (block-rescan)
+
+ assert_eq!(node_txn[0], node_txn[7]);
+ assert_eq!(node_txn[1], node_txn[8]);
+ assert_eq!(node_txn[2], node_txn[9]);
+ assert_eq!(node_txn[3], node_txn[10]);
+ assert_eq!(node_txn[4], node_txn[11]);
+ assert_eq!(node_txn[3], node_txn[5]); //local commitment tx + htlc timeout tx broadcated by ChannelManger
+ assert_eq!(node_txn[4], node_txn[6]);
+
+ assert_eq!(node_txn[0].input.len(), 1);
+ assert_eq!(node_txn[1].input.len(), 1);
+ assert_eq!(node_txn[2].input.len(), 1);
+
+ let mut revoked_tx_map = HashMap::new();
+ revoked_tx_map.insert(revoked_local_txn[0].txid(), revoked_local_txn[0].clone());
+ node_txn[0].verify(&revoked_tx_map).unwrap();
+ node_txn[1].verify(&revoked_tx_map).unwrap();
+ node_txn[2].verify(&revoked_tx_map).unwrap();
+
+ let mut witness_lens = BTreeSet::new();
+ witness_lens.insert(node_txn[0].input[0].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[1].input[0].witness.last().unwrap().len());
+ witness_lens.insert(node_txn[2].input[0].witness.last().unwrap().len());
+ assert_eq!(witness_lens.len(), 3);
+ assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local
+ assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), 133); // revoked offered HTLC
+ assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), 138); // revoked received HTLC
+
+ assert_eq!(node_txn[3].input.len(), 1);
+ check_spends!(node_txn[3], chan_1.3.clone());
+
+ assert_eq!(node_txn[4].input.len(), 1);
+ let witness_script = node_txn[4].input[0].witness.last().unwrap();
+ assert_eq!(witness_script.len(), 133); //Spending an offered htlc output
+ assert_eq!(node_txn[4].input[0].previous_output.txid, node_txn[3].txid());
+ assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[0].input[0].previous_output.txid);
+ assert_ne!(node_txn[4].input[0].previous_output.txid, node_txn[1].input[0].previous_output.txid);
+ }
+ get_announce_close_broadcast_events(&nodes, 0, 1);
+ assert_eq!(nodes[0].node.list_channels().len(), 0);
+ assert_eq!(nodes[1].node.list_channels().len(), 0);
+ }
+
+ #[test]
+ fn test_htlc_ignore_latest_remote_commitment() {
+ // Test that HTLC transactions spending the latest remote commitment transaction are simply
+ // ignored if we cannot claim them. This originally tickled an invalid unwrap().
+ let nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ route_payment(&nodes[0], &[&nodes[1]], 10000000);
+ nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
+ {
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
+ assert_eq!(flags & 0b10, 0b10);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 2);
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
+
+ {
+ let events = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
+ assert_eq!(flags & 0b10, 0b10);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ // Duplicate the block_connected call since this may happen due to other listeners
+ // registering new transactions
+ nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&node_txn[0], &node_txn[1]], &[1; 2]);
+ }
+
+ #[test]
+ fn test_force_close_fail_back() {
+ // Check which HTLCs are failed-backwards on channel force-closure
+ let mut nodes = create_network(3);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 1, 2);
+
+ let route = nodes[0].router.get_route(&nodes[2].node.get_our_node_id(), None, &Vec::new(), 1000000, 42).unwrap();
+
+ let (our_payment_preimage, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
+
+ let mut payment_event = {
+ nodes[0].node.send_payment(route, our_payment_hash).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+
+ let events_1 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
+ nodes[1].node.process_pending_htlc_forwards();
+
+ let mut events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ payment_event = SendEvent::from_event(events_2.remove(0));
+ assert_eq!(payment_event.msgs.len(), 1);
+
+ check_added_monitors!(nodes[1], 1);
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[2], 1);
+
+ // nodes[2] now has the latest commitment transaction, but hasn't revoked its previous
+ // state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
+ // transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
+
+ nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
+ let events_3 = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ match events_3[0] {
+ Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
+ assert_eq!(flags & 0b10, 0b10);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ let tx = {
+ let mut node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ // Note that we don't bother broadcasting the HTLC-Success transaction here as we don't
+ // have a use for it unless nodes[2] learns the preimage somehow, the funds will go
+ // back to nodes[1] upon timeout otherwise.
+ assert_eq!(node_txn.len(), 1);
+ node_txn.remove(0)
+ };
+
+ let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ nodes[1].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
+
+ let events_4 = nodes[1].node.get_and_clear_pending_events();
+ // Note no UpdateHTLCs event here from nodes[1] to nodes[0]!
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
+ assert_eq!(flags & 0b10, 0b10);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ // Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
+ {
+ let mut monitors = nodes[2].chan_monitor.simple_monitor.monitors.lock().unwrap();
+ monitors.get_mut(&OutPoint::new(Sha256dHash::from(&payment_event.commitment_msg.channel_id[..]), 0)).unwrap()
+ .provide_payment_preimage(&our_payment_hash, &our_payment_preimage);
+ }
+ nodes[2].chain_monitor.block_connected_checked(&header, 1, &[&tx], &[1]);
+ let node_txn = nodes[2].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 1);
+ assert_eq!(node_txn[0].input.len(), 1);
+ assert_eq!(node_txn[0].input[0].previous_output.txid, tx.txid());
+ assert_eq!(node_txn[0].lock_time, 0); // Must be an HTLC-Success
+ assert_eq!(node_txn[0].input[0].witness.len(), 5); // Must be an HTLC-Success
+
+ check_spends!(node_txn[0], tx);
+ }
+
+ #[test]
+ fn test_unconf_chan() {
+ // After creating a chan between nodes, we disconnect all blocks previously seen to force a channel close on nodes[0] side
+ let nodes = create_network(2);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ let channel_state = nodes[0].node.channel_state.lock().unwrap();
+ assert_eq!(channel_state.by_id.len(), 1);
+ assert_eq!(channel_state.short_to_id.len(), 1);
+ mem::drop(channel_state);
+
+ let mut headers = Vec::new();
+ let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ headers.push(header.clone());
+ for _i in 2..100 {
+ header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
+ headers.push(header.clone());
+ }
+ while !headers.is_empty() {
+ nodes[0].node.block_disconnected(&headers.pop().unwrap());
+ }
+ {
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ Event::BroadcastChannelUpdate { msg: msgs::ChannelUpdate { contents: msgs::UnsignedChannelUpdate { flags, .. }, .. } } => {
+ assert_eq!(flags & 0b10, 0b10);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+ let channel_state = nodes[0].node.channel_state.lock().unwrap();
+ assert_eq!(channel_state.by_id.len(), 0);
+ assert_eq!(channel_state.short_to_id.len(), 0);
+ }
+
+ /// pending_htlc_adds includes both the holding cell and in-flight update_add_htlcs, whereas
+ /// for claims/fails they are separated out.
+ fn reconnect_nodes(node_a: &Node, node_b: &Node, pre_all_htlcs: bool, pending_htlc_adds: (i64, i64), pending_htlc_claims: (usize, usize), pending_cell_htlc_claims: (usize, usize), pending_cell_htlc_fails: (usize, usize), pending_raa: (bool, bool)) {
+ let reestablish_1 = node_a.node.peer_connected(&node_b.node.get_our_node_id());
+ let reestablish_2 = node_b.node.peer_connected(&node_a.node.get_our_node_id());
+
+ let mut resp_1 = Vec::new();
+ for msg in reestablish_1 {
+ resp_1.push(node_b.node.handle_channel_reestablish(&node_a.node.get_our_node_id(), &msg).unwrap());
+ }
+ if pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
+ check_added_monitors!(node_b, 1);
+ } else {
+ check_added_monitors!(node_b, 0);
+ }
+
+ let mut resp_2 = Vec::new();
+ for msg in reestablish_2 {
+ resp_2.push(node_a.node.handle_channel_reestablish(&node_b.node.get_our_node_id(), &msg).unwrap());
+ }
+ if pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
+ check_added_monitors!(node_a, 1);
+ } else {
+ check_added_monitors!(node_a, 0);
+ }
+
+ // We dont yet support both needing updates, as that would require a different commitment dance:
+ assert!((pending_htlc_adds.0 == 0 && pending_htlc_claims.0 == 0 && pending_cell_htlc_claims.0 == 0 && pending_cell_htlc_fails.0 == 0) ||
+ (pending_htlc_adds.1 == 0 && pending_htlc_claims.1 == 0 && pending_cell_htlc_claims.1 == 0 && pending_cell_htlc_fails.1 == 0));
+
+ for chan_msgs in resp_1.drain(..) {
+ if pre_all_htlcs {
+ let a = node_a.node.handle_funding_locked(&node_b.node.get_our_node_id(), &chan_msgs.0.unwrap());
+ let _announcement_sigs_opt = a.unwrap();
+ //TODO: Test announcement_sigs re-sending when we've implemented it
+ } else {
+ assert!(chan_msgs.0.is_none());
+ }
+ if pending_raa.0 {
+ assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
+ check_added_monitors!(node_a, 1);
+ } else {
+ assert!(chan_msgs.1.is_none());
+ }
+ if pending_htlc_adds.0 != 0 || pending_htlc_claims.0 != 0 || pending_cell_htlc_claims.0 != 0 || pending_cell_htlc_fails.0 != 0 {
+ let commitment_update = chan_msgs.2.unwrap();
+ if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.0 as usize);
+ } else {
+ assert!(commitment_update.update_add_htlcs.is_empty());
+ }
+ assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+ assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
+ assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+ for update_add in commitment_update.update_add_htlcs {
+ node_a.node.handle_update_add_htlc(&node_b.node.get_our_node_id(), &update_add).unwrap();
+ }
+ for update_fulfill in commitment_update.update_fulfill_htlcs {
+ node_a.node.handle_update_fulfill_htlc(&node_b.node.get_our_node_id(), &update_fulfill).unwrap();
+ }
+ for update_fail in commitment_update.update_fail_htlcs {
+ node_a.node.handle_update_fail_htlc(&node_b.node.get_our_node_id(), &update_fail).unwrap();
+ }
+
+ if pending_htlc_adds.0 != -1 { // We use -1 to denote a response commitment_signed
+ commitment_signed_dance!(node_a, node_b, commitment_update.commitment_signed, false);
+ } else {
+ let (as_revoke_and_ack, as_commitment_signed) = node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(node_a, 1);
+ assert!(as_commitment_signed.is_none());
+ assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(node_b, 1);
+ }
+ } else {
+ assert!(chan_msgs.2.is_none());
+ }
+ }
+
+ for chan_msgs in resp_2.drain(..) {
+ if pre_all_htlcs {
+ let _announcement_sigs_opt = node_b.node.handle_funding_locked(&node_a.node.get_our_node_id(), &chan_msgs.0.unwrap()).unwrap();
+ //TODO: Test announcement_sigs re-sending when we've implemented it
+ } else {
+ assert!(chan_msgs.0.is_none());
+ }
+ if pending_raa.1 {
+ assert!(node_b.node.handle_revoke_and_ack(&node_a.node.get_our_node_id(), &chan_msgs.1.unwrap()).unwrap().is_none());
+ check_added_monitors!(node_b, 1);
+ } else {
+ assert!(chan_msgs.1.is_none());
+ }
+ if pending_htlc_adds.1 != 0 || pending_htlc_claims.1 != 0 || pending_cell_htlc_claims.1 != 0 || pending_cell_htlc_fails.1 != 0 {
+ let commitment_update = chan_msgs.2.unwrap();
+ if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+ assert_eq!(commitment_update.update_add_htlcs.len(), pending_htlc_adds.1 as usize);
+ }
+ assert_eq!(commitment_update.update_fulfill_htlcs.len(), pending_htlc_claims.0 + pending_cell_htlc_claims.0);
+ assert_eq!(commitment_update.update_fail_htlcs.len(), pending_cell_htlc_fails.0);
+ assert!(commitment_update.update_fail_malformed_htlcs.is_empty());
+ for update_add in commitment_update.update_add_htlcs {
+ node_b.node.handle_update_add_htlc(&node_a.node.get_our_node_id(), &update_add).unwrap();
+ }
+ for update_fulfill in commitment_update.update_fulfill_htlcs {
+ node_b.node.handle_update_fulfill_htlc(&node_a.node.get_our_node_id(), &update_fulfill).unwrap();
+ }
+ for update_fail in commitment_update.update_fail_htlcs {
+ node_b.node.handle_update_fail_htlc(&node_a.node.get_our_node_id(), &update_fail).unwrap();
+ }
+
+ if pending_htlc_adds.1 != -1 { // We use -1 to denote a response commitment_signed
+ commitment_signed_dance!(node_b, node_a, commitment_update.commitment_signed, false);
+ } else {
+ let (bs_revoke_and_ack, bs_commitment_signed) = node_b.node.handle_commitment_signed(&node_a.node.get_our_node_id(), &commitment_update.commitment_signed).unwrap();
+ check_added_monitors!(node_b, 1);
+ assert!(bs_commitment_signed.is_none());
+ assert!(node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(node_a, 1);
+ }
+ } else {
+ assert!(chan_msgs.2.is_none());
+ }
+ }
+ }
+
+ #[test]
+ fn test_simple_peer_disconnect() {
+ // Test that we can reconnect when there are no lost messages
+ let nodes = create_network(3);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 1, 2);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
+ let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
+ fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_2);
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_1);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
+ let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0;
+ let payment_hash_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
+ let payment_hash_6 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1;
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ claim_payment_along_route(&nodes[0], &vec!(&nodes[1], &nodes[2]), true, payment_preimage_3);
+ fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5);
+
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (1, 0), (1, 0), (false, false));
+ {
+ let events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ Event::PaymentSent { payment_preimage } => {
+ assert_eq!(payment_preimage, payment_preimage_3);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ Event::PaymentFailed { payment_hash } => {
+ assert_eq!(payment_hash, payment_hash_5);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ }
+
+ claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4);
+ fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6);
+ }
+
+ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) {
+ // Test that we can reconnect when in-flight HTLC updates get dropped
+ let mut nodes = create_network(2);
+ if messages_delivered == 0 {
+ create_chan_between_nodes_with_value_a(&nodes[0], &nodes[1], 100000, 10001);
+ // nodes[1] doesn't receive the funding_locked message (it'll be re-sent on reconnect)
+ } else {
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ }
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]);
+
+ let payment_event = {
+ nodes[0].node.send_payment(route.clone(), payment_hash_1).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ let mut events = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ SendEvent::from_event(events.remove(0))
+ };
+ assert_eq!(nodes[1].node.get_our_node_id(), payment_event.node_id);
+
+ if messages_delivered < 2 {
+ // Drop the payment_event messages, and let them get re-generated in reconnect_nodes!
+ } else {
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]).unwrap();
+ let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg).unwrap();
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 3 {
+ assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 4 {
+ let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_commitment_signed.unwrap()).unwrap();
+ assert!(as_commitment_signed.is_none());
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 5 {
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+ }
+ }
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ if messages_delivered < 2 {
+ // Even if the funding_locked messages get exchanged, as long as nothing further was
+ // received on either side, both sides will need to resend them.
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 1), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 2 {
+ // nodes[0] still wants its RAA + commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ } else if messages_delivered == 3 {
+ // nodes[0] still wants its commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (-1, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 4 {
+ // nodes[1] still wants its final RAA
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, true));
+ } else if messages_delivered == 5 {
+ // Everything was delivered...
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ let events_1 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::PendingHTLCsForwardable { .. } => { },
+ _ => panic!("Unexpected event"),
+ };
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ nodes[1].node.channel_state.lock().unwrap().next_forward = Instant::now();
+ nodes[1].node.process_pending_htlc_forwards();
+
+ let events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::PaymentReceived { ref payment_hash, amt } => {
+ assert_eq!(payment_hash_1, *payment_hash);
+ assert_eq!(amt, 1000000);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ nodes[1].node.claim_funds(payment_preimage_1);
+ check_added_monitors!(nodes[1], 1);
+
+ let events_3 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_3.len(), 1);
+ let (update_fulfill_htlc, commitment_signed) = match events_3[0] {
+ Event::UpdateHTLCs { ref node_id, ref updates } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(updates.update_add_htlcs.is_empty());
+ assert!(updates.update_fail_htlcs.is_empty());
+ assert_eq!(updates.update_fulfill_htlcs.len(), 1);
+ assert!(updates.update_fail_malformed_htlcs.is_empty());
+ assert!(updates.update_fee.is_none());
+ (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone())
+ },
+ _ => panic!("Unexpected event"),
+ };
+
+ if messages_delivered >= 1 {
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_htlc).unwrap();
+
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(payment_preimage_1, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ if messages_delivered >= 2 {
+ let (as_revoke_and_ack, as_commitment_signed) = nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed).unwrap();
+ check_added_monitors!(nodes[0], 1);
+
+ if messages_delivered >= 3 {
+ assert!(nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 4 {
+ let (bs_revoke_and_ack, bs_commitment_signed) = nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_commitment_signed.unwrap()).unwrap();
+ assert!(bs_commitment_signed.is_none());
+ check_added_monitors!(nodes[1], 1);
+
+ if messages_delivered >= 5 {
+ assert!(nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_revoke_and_ack).unwrap().is_none());
+ check_added_monitors!(nodes[0], 1);
+ }
+ }
+ }
+ }
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ if messages_delivered < 2 {
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (1, 0), (0, 0), (0, 0), (false, false));
+ //TODO: Deduplicate PaymentSent events, then enable this if:
+ //if messages_delivered < 1 {
+ let events_4 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_4.len(), 1);
+ match events_4[0] {
+ Event::PaymentSent { ref payment_preimage } => {
+ assert_eq!(payment_preimage_1, *payment_preimage);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ //}
+ } else if messages_delivered == 2 {
+ // nodes[0] still wants its RAA + commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, true));
+ } else if messages_delivered == 3 {
+ // nodes[0] still wants its commitment_signed
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, -1), (0, 0), (0, 0), (0, 0), (false, false));
+ } else if messages_delivered == 4 {
+ // nodes[1] still wants its final RAA
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (true, false));
+ } else if messages_delivered == 5 {
+ // Everything was delivered...
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ }
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], false, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // Channel should still work fine...
+ let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0;
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
+ }
+
+ #[test]
+ fn test_drop_messages_peer_disconnect_a() {
+ do_test_drop_messages_peer_disconnect(0);
+ do_test_drop_messages_peer_disconnect(1);
+ do_test_drop_messages_peer_disconnect(2);
+ }
+
+ #[test]
+ fn test_drop_messages_peer_disconnect_b() {
+ do_test_drop_messages_peer_disconnect(3);
+ do_test_drop_messages_peer_disconnect(4);
+ do_test_drop_messages_peer_disconnect(5);
+ }
+
+ #[test]
+ fn test_funding_peer_disconnect() {
+ // Test that we can lock in our funding tx while disconnected
+ let nodes = create_network(2);
+ let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ confirm_transaction(&nodes[0].chain_monitor, &tx, tx.version);
+ let events_1 = nodes[0].node.get_and_clear_pending_events();
+ assert_eq!(events_1.len(), 1);
+ match events_1[0] {
+ Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => {
+ assert_eq!(*node_id, nodes[1].node.get_our_node_id());
+ assert!(announcement_sigs.is_none());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ confirm_transaction(&nodes[1].chain_monitor, &tx, tx.version);
+ let events_2 = nodes[1].node.get_and_clear_pending_events();
+ assert_eq!(events_2.len(), 1);
+ match events_2[0] {
+ Event::SendFundingLocked { ref node_id, msg: _, ref announcement_sigs } => {
+ assert_eq!(*node_id, nodes[0].node.get_our_node_id());
+ assert!(announcement_sigs.is_none());
+ },
+ _ => panic!("Unexpected event"),
+ }
+
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+ reconnect_nodes(&nodes[0], &nodes[1], true, (0, 0), (0, 0), (0, 0), (0, 0), (false, false));
+
+ // TODO: We shouldn't need to manually pass list_usable_chanels here once we support
+ // rebroadcasting announcement_signatures upon reconnect.
+
+ let route = nodes[0].router.get_route(&nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV).unwrap();
+ let (payment_preimage, _) = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000);
+ claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
+ }
+
+ #[test]
+ fn test_invalid_channel_announcement() {
+ //Test BOLT 7 channel_announcement msg requirement for final node, gather data to build customed channel_announcement msgs
+ let secp_ctx = Secp256k1::new();
+ let nodes = create_network(2);
+
+ let chan_announcement = create_chan_between_nodes(&nodes[0], &nodes[1]);
+
+ let a_channel_lock = nodes[0].node.channel_state.lock().unwrap();
+ let b_channel_lock = nodes[1].node.channel_state.lock().unwrap();
+ let as_chan = a_channel_lock.by_id.get(&chan_announcement.3).unwrap();
+ let bs_chan = b_channel_lock.by_id.get(&chan_announcement.3).unwrap();
+
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
+
+ let as_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &as_chan.get_local_keys().funding_key);
+ let bs_bitcoin_key = PublicKey::from_secret_key(&secp_ctx, &bs_chan.get_local_keys().funding_key);
+
+ let as_network_key = nodes[0].node.get_our_node_id();
+ let bs_network_key = nodes[1].node.get_our_node_id();
+
+ let were_node_one = as_bitcoin_key.serialize()[..] < bs_bitcoin_key.serialize()[..];
+
+ let mut chan_announcement;
+
+ macro_rules! dummy_unsigned_msg {
+ () => {
+ msgs::UnsignedChannelAnnouncement {
+ features: msgs::GlobalFeatures::new(),
+ chain_hash: genesis_block(Network::Testnet).header.bitcoin_hash(),
+ short_channel_id: as_chan.get_short_channel_id().unwrap(),
+ node_id_1: if were_node_one { as_network_key } else { bs_network_key },
+ node_id_2: if were_node_one { bs_network_key } else { as_network_key },
+ bitcoin_key_1: if were_node_one { as_bitcoin_key } else { bs_bitcoin_key },
+ bitcoin_key_2: if were_node_one { bs_bitcoin_key } else { as_bitcoin_key },
+ excess_data: Vec::new(),
+ };
+ }
+ }
+
+ macro_rules! sign_msg {
+ ($unsigned_msg: expr) => {
+ let msghash = Message::from_slice(&Sha256dHash::from_data(&$unsigned_msg.encode()[..])[..]).unwrap();
+ let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_local_keys().funding_key);
+ let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_local_keys().funding_key);
+ let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].node.our_network_key);
+ let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].node.our_network_key);
+ chan_announcement = msgs::ChannelAnnouncement {
+ node_signature_1 : if were_node_one { as_node_sig } else { bs_node_sig},
+ node_signature_2 : if were_node_one { bs_node_sig } else { as_node_sig},
+ bitcoin_signature_1: if were_node_one { as_bitcoin_sig } else { bs_bitcoin_sig },
+ bitcoin_signature_2 : if were_node_one { bs_bitcoin_sig } else { as_bitcoin_sig },
+ contents: $unsigned_msg
+ }
+ }
+ }
+
+ let unsigned_msg = dummy_unsigned_msg!();
+ sign_msg!(unsigned_msg);
+ assert_eq!(nodes[0].router.handle_channel_announcement(&chan_announcement).unwrap(), true);
+ let _ = nodes[0].router.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
+
+ // Configured with Network::Testnet
+ let mut unsigned_msg = dummy_unsigned_msg!();
+ unsigned_msg.chain_hash = genesis_block(Network::Bitcoin).header.bitcoin_hash();
+ sign_msg!(unsigned_msg);
+ assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());
+
+ let mut unsigned_msg = dummy_unsigned_msg!();
+ unsigned_msg.chain_hash = Sha256dHash::from_data(&[1,2,3,4,5,6,7,8,9]);
+ sign_msg!(unsigned_msg);
+ assert!(nodes[0].router.handle_channel_announcement(&chan_announcement).is_err());