X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=be23e522e001f9c002981dc2e1c187c73b2c15a3;hb=f438778715dc41d95a211c16aef01cbea72e5b77;hp=d866693ffec93a73dc54ac085bedbf66db663da9;hpb=0b4079df9a2d4fd7b178f7441c5fb89d79e40a4d;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d866693f..be23e522 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3577,7 +3577,7 @@ fn test_dup_events_on_peer_disconnect() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); } @@ -3593,7 +3593,7 @@ fn test_simple_peer_disconnect() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_hash_2 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).1; @@ -3602,7 +3602,7 @@ fn test_simple_peer_disconnect() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[2])[..], 1000000).0; @@ -3615,7 +3615,7 @@ fn test_simple_peer_disconnect() { claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[2]]], true, payment_preimage_3); fail_payment_along_route(&nodes[0], &[&nodes[1], &nodes[2]], true, payment_hash_5); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (1, 0), (1, 0), (false, false)); { let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -3719,19 +3719,19 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } // Even if the funding_locked messages get exchanged, as long as nothing further was // received on either side, both sides will need to resend them. - reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } else if messages_delivered == 3 { // nodes[0] still wants its RAA + commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (true, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); } else if messages_delivered == 4 { // nodes[0] still wants its commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (-1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } else if messages_delivered == 5 { // nodes[1] still wants its final RAA - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); } else if messages_delivered == 6 { // Everything was delivered... - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } let events_1 = nodes[1].node.get_and_clear_pending_events(); @@ -3743,7 +3743,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[1].node.process_pending_htlc_forwards(); @@ -3823,7 +3823,7 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); if messages_delivered < 2 { - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); if messages_delivered < 1 { let events_4 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_4.len(), 1); @@ -3838,21 +3838,21 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8, simulate_broken } } else if messages_delivered == 2 { // nodes[0] still wants its RAA + commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, true)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, true)); } else if messages_delivered == 3 { // nodes[0] still wants its commitment_signed - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, -1), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } else if messages_delivered == 4 { // nodes[1] still wants its final RAA - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (true, false)); } else if messages_delivered == 5 { // Everything was delivered... - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); } nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); // Channel should still work fine... let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; @@ -3904,7 +3904,7 @@ fn test_funding_peer_disconnect() { _ => panic!("Unexpected event"), } - reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); @@ -3927,7 +3927,7 @@ fn test_funding_peer_disconnect() { _ => panic!("Unexpected event"), }; - reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); nodes[0].node.handle_funding_locked(&nodes[1].node.get_our_node_id(), &funding_locked); nodes[0].node.handle_announcement_signatures(&nodes[1].node.get_our_node_id(), &bs_announcement_sigs); @@ -4005,7 +4005,7 @@ fn test_funding_peer_disconnect() { nodes[0].node = &nodes_0_deserialized; check_added_monitors!(nodes[0], 1); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); // as_announcement should be re-generated exactly by broadcast_node_announcement. nodes[0].node.broadcast_node_announcement([0, 0, 0], [0; 32], Vec::new()); @@ -4746,7 +4746,7 @@ fn test_simple_manager_serialize_deserialize() { nodes[0].node = &nodes_0_deserialized; check_added_monitors!(nodes[0], 1); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); fail_payment(&nodes[0], &[&nodes[1]], our_payment_hash); claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); @@ -4860,8 +4860,8 @@ fn test_manager_serialize_deserialize_inconsistent_monitor() { nodes[0].node = &nodes_0_deserialized; // nodes[1] and nodes[2] have no lost state with nodes[0]... - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); - reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); //... and we can even still claim the payment! claim_payment(&nodes[2], &[&nodes[0], &nodes[1]], our_payment_preimage); @@ -5329,16 +5329,11 @@ fn test_onchain_to_onchain_claim() { // So we broadcast C's commitment tx and HTLC-Success on B's chain, we should successfully be able to extract preimage and update downstream monitor let header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42}; connect_block(&nodes[1], &Block { header, txdata: vec![c_txn[1].clone(), c_txn[2].clone()]}); - connect_blocks(&nodes[1], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires { let mut b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: claim tx, ChannelManager: local commitment tx - assert_eq!(b_txn.len(), 2); + // ChannelMonitor: claim tx + assert_eq!(b_txn.len(), 1); check_spends!(b_txn[0], chan_2.3); // B local commitment tx, issued by ChannelManager - check_spends!(b_txn[1], c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor - assert_eq!(b_txn[1].input[0].witness.clone().last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - assert!(b_txn[1].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert_ne!(b_txn[1].lock_time, 0); // Timeout tx b_txn.clear(); } check_added_monitors!(nodes[1], 1); @@ -5367,19 +5362,14 @@ fn test_onchain_to_onchain_claim() { let commitment_tx = get_local_commitment_txn!(nodes[0], chan_1.2); mine_transaction(&nodes[1], &commitment_tx[0]); let b_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); - // ChannelMonitor: HTLC-Success tx + HTLC-Timeout RBF Bump, ChannelManager: local commitment tx + HTLC-Success tx - assert_eq!(b_txn.len(), 4); - check_spends!(b_txn[2], chan_1.3); - check_spends!(b_txn[3], b_txn[2]); - let (htlc_success_claim, htlc_timeout_bumped) = - if b_txn[0].input[0].previous_output.txid == commitment_tx[0].txid() - { (&b_txn[0], &b_txn[1]) } else { (&b_txn[1], &b_txn[0]) }; - check_spends!(htlc_success_claim, commitment_tx[0]); - assert_eq!(htlc_success_claim.input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); - assert!(htlc_success_claim.output[0].script_pubkey.is_v0_p2wpkh()); // direct payment - assert_eq!(htlc_success_claim.lock_time, 0); // Success tx - check_spends!(htlc_timeout_bumped, c_txn[1]); // timeout tx on C remote commitment tx, issued by ChannelMonitor - assert_ne!(htlc_timeout_bumped.lock_time, 0); // Success tx + // ChannelMonitor: HTLC-Success tx, ChannelManager: local commitment tx + HTLC-Success tx + assert_eq!(b_txn.len(), 3); + check_spends!(b_txn[1], chan_1.3); + check_spends!(b_txn[2], b_txn[1]); + check_spends!(b_txn[0], commitment_tx[0]); + assert_eq!(b_txn[0].input[0].witness.clone().last().unwrap().len(), OFFERED_HTLC_SCRIPT_WEIGHT); + assert!(b_txn[0].output[0].script_pubkey.is_v0_p2wpkh()); // direct payment + assert_eq!(b_txn[0].lock_time, 0); // Success tx check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1);