X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchanmon_update_fail_tests.rs;h=8f4ebdc58726130ac091c45a8154d1c89f29e18e;hb=ec5e837cc2390573fa4273381c182a174ebfe027;hp=e59cf47f17600963c9852389fc5723d8189816ef;hpb=17a74fcfc74936b48923bba5a416be73a01d2367;p=rust-lightning diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e59cf47f..8f4ebdc5 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -70,7 +70,8 @@ fn test_simple_monitor_permanent_update_fail() { // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }, + [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -178,7 +179,9 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - reconnect_nodes(&nodes[0], &nodes[1], (true, true), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready = (true, true); + reconnect_nodes(reconnect_args); } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -233,7 +236,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { if disconnect { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); } // ...and make sure we can force-close a frozen channel @@ -245,7 +248,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -350,10 +353,14 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -372,10 +379,14 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -1136,8 +1147,12 @@ fn test_monitor_update_fail_reestablish() { commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1155,8 +1170,12 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); @@ -1331,8 +1350,12 @@ fn claim_while_disconnected_monitor_update_fail() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1436,12 +1459,12 @@ fn monitor_failed_no_reestablish_response() { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } { let mut node_1_per_peer_lock; let mut node_1_peer_state_lock; - get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context.announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure @@ -1467,8 +1490,12 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1901,7 +1928,9 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - reconnect_nodes(&nodes[0], &nodes[1], (false, confirm_a_first), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); + reconnect_args.send_channel_ready.1 = confirm_a_first; + reconnect_nodes(reconnect_args); // But we want to re-emit ChannelPending expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -1959,8 +1988,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -2076,9 +2105,13 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2156,7 +2189,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); @@ -2204,9 +2237,13 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2314,6 +2351,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); check_added_monitors!(nodes[0], 0); + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.claim_funds(payment_preimage_0); @@ -2333,18 +2371,23 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // disconnect the peers. Note that the fuzzer originally found this issue because // deserializing a ChannelManager in this state causes an assertion failure. if reload_a { - let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } else { nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); } nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); // Now reconnect the two - nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: nodes[1].node.init_features(), remote_network_address: None }, true).unwrap(); + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { + features: nodes[1].node.init_features(), networks: None, remote_network_address: None + }, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap(); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { + features: nodes[0].node.init_features(), networks: None, remote_network_address: None + }, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -2370,9 +2413,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert_eq!(pending_cs.commitment_signed, cs); } else { panic!(); } - // There should be no monitor updates as we are still pending awaiting a failed one. - check_added_monitors!(nodes[0], 0); - check_added_monitors!(nodes[1], 0); + if reload_a { + // The two pending monitor updates were replayed (but are still pending). + check_added_monitors(&nodes[0], 2); + } else { + // There should be no monitor updates as we are still pending awaiting a failed one. + check_added_monitors(&nodes[0], 0); + } + check_added_monitors(&nodes[1], 0); } // If we finish updating the monitor, we should free the holding cell right away (this did @@ -2532,10 +2580,14 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f nodes[2].node.peer_disconnected(&nodes[1].node.get_our_node_id()); if second_fails { - reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0), (false, false)); + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.pending_htlc_fails.0 = 1; + reconnect_nodes(reconnect_args); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); } else { - reconnect_nodes(&nodes[1], &nodes[2], (false, false), (0, 0), (1, 0), (0, 0), (0, 0), (0, 0), (false, false)); + let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); + reconnect_args.pending_htlc_claims.0 = 1; + reconnect_nodes(reconnect_args); } if htlc_status == HTLCStatusAtDupClaim::HoldingCell { @@ -2615,8 +2667,8 @@ fn test_temporary_error_during_shutdown() { assert_eq!(txn_a, txn_b); assert_eq!(txn_a.len(), 1); check_spends!(txn_a[0], funding_tx); - check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure); - check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure); + check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -2645,7 +2697,8 @@ fn test_permanent_error_during_sending_shutdown() { if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); } check_added_monitors!(nodes[0], 2); - check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); + check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }, + [nodes[1].node.get_our_node_id()], 100000); } #[test] @@ -2676,7 +2729,8 @@ fn test_permanent_error_during_handling_shutdown() { if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); } check_added_monitors!(nodes[1], 2); - check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }); + check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() }, + [nodes[0].node.get_our_node_id()], 100000); } #[test] @@ -2870,7 +2924,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0].chain_source.watched_outputs.lock().unwrap().clear(); reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized); - check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000); assert!(nodes[0].node.list_channels().is_empty()); } @@ -2957,7 +3011,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000); assert!(nodes[1].node.list_channels().is_empty()); }