X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=eb7d868a95d05becd024058ab40a4969aa396217;hb=f5b6e7f58a108ae2394bda4ef98650cb4a50597e;hp=ca4fa3b3563baf8cc041886429cebe554d17c5ee;hpb=79541b11e8b6e62de0fc613f416e30bf1de5f3d9;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ca4fa3b3..eb7d868a 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -12,7 +12,7 @@ //! claim outputs on-chain. use chain; -use chain::{Confirm, Listen, Watch}; +use chain::{Confirm, Listen, Watch, ChannelMonitorUpdateErr}; use chain::channelmonitor; use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY}; use chain::transaction::OutPoint; @@ -25,6 +25,7 @@ use ln::{chan_utils, onion_utils}; use ln::chan_utils::HTLC_SUCCESS_TX_WEIGHT; use routing::network_graph::{NetworkUpdate, RoutingFees}; use routing::router::{Route, RouteHop, RouteHint, RouteHintHop, get_route, get_keysend_route}; +use routing::scorer::Scorer; use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures}; use ln::msgs; use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction}; @@ -4099,21 +4100,15 @@ fn test_no_txn_manager_serialize_deserialize() { send_payment(&nodes[0], &[&nodes[1]], 1000000); } -#[test] -fn test_dup_htlc_onchain_fails_on_reload() { +fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool) { // When a Channel is closed, any outbound HTLCs which were relayed through it are simply // dropped when the Channel is. From there, the ChannelManager relies on the ChannelMonitor // having a copy of the relevant fail-/claim-back data and processes the HTLC fail/claim when // the ChannelMonitor tells it to. // - // If, due to an on-chain event, an HTLC is failed/claimed, and then we serialize the - // ChannelManager, we generally expect there not to be a duplicate HTLC fail/claim (eg via a - // PaymentPathFailed event appearing). However, because we may not serialize the relevant - // ChannelMonitor at the same time, this isn't strictly guaranteed. In order to provide this - // consistency, the ChannelManager explicitly tracks pending-onchain-resolution outbound HTLCs - // and de-duplicates ChannelMonitor events. - // - // This tests that explicit tracking behavior. + // If, due to an on-chain event, an HTLC is failed/claimed, we should avoid providing the + // ChannelManager the HTLC event until after the monitor is re-persisted. This should prevent a + // duplicate HTLC fail/claim (e.g. via a PaymentPathFailed event). let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -4122,7 +4117,7 @@ fn test_dup_htlc_onchain_fails_on_reload() { let nodes_0_deserialized: ChannelManager; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; + let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Route a payment, but force-close the channel before the HTLC fulfill message arrives at // nodes[0]. @@ -4140,35 +4135,59 @@ fn test_dup_htlc_onchain_fails_on_reload() { let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 3); assert_eq!(node_txn[0], node_txn[1]); + check_spends!(node_txn[1], funding_tx); + check_spends!(node_txn[2], node_txn[1]); assert!(nodes[1].node.claim_funds(payment_preimage)); check_added_monitors!(nodes[1], 1); let mut header = BlockHeader { version: 0x20000000, prev_blockhash: nodes[1].best_block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; - connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]}); + connect_block(&nodes[1], &Block { header, txdata: vec![node_txn[1].clone()]}); check_closed_broadcast!(nodes[1], true); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); let claim_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); header.prev_blockhash = nodes[0].best_block_hash(); - connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone(), node_txn[2].clone()]}); + connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[1].clone()]}); - // Serialize out the ChannelMonitor before connecting the on-chain claim transactions. This is - // fairly normal behavior as ChannelMonitor(s) are often not re-serialized when on-chain events - // happen, unlike ChannelManager which tends to be re-serialized after any relevant event(s). - let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); - get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); + // Now connect the HTLC claim transaction with the ChainMonitor-generated ChannelMonitor update + // returning TemporaryFailure. This should cause the claim event to never make its way to the + // ChannelManager. + chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear(); + chanmon_cfgs[0].persister.set_update_ret(Err(ChannelMonitorUpdateErr::TemporaryFailure)); header.prev_blockhash = nodes[0].best_block_hash(); - let claim_block = Block { header, txdata: claim_txn}; + let claim_block = Block { header, txdata: claim_txn }; connect_block(&nodes[0], &claim_block); - expect_payment_sent!(nodes[0], payment_preimage); - // ChannelManagers generally get re-serialized after any relevant event(s). Since we just - // connected a highly-relevant block, it likely gets serialized out now. + let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 }; + let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap() + .get_mut(&funding_txo).unwrap().drain().collect(); + assert_eq!(mon_updates.len(), 1); + assert!(nodes[0].chain_monitor.release_pending_monitor_events().is_empty()); + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + + // If we persist the ChannelManager here, we should get the PaymentSent event after + // deserialization. let mut chan_manager_serialized = test_utils::TestVecWriter(Vec::new()); - nodes[0].node.write(&mut chan_manager_serialized).unwrap(); + if !persist_manager_post_event { + nodes[0].node.write(&mut chan_manager_serialized).unwrap(); + } + + // Now persist the ChannelMonitor and inform the ChainMonitor that we're done, generating the + // payment sent event. + chanmon_cfgs[0].persister.set_update_ret(Ok(())); + let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new()); + get_monitor!(nodes[0], chan_id).write(&mut chan_0_monitor_serialized).unwrap(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, mon_updates[0]).unwrap(); + expect_payment_sent!(nodes[0], payment_preimage); + + // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it + // twice. + if persist_manager_post_event { + nodes[0].node.write(&mut chan_manager_serialized).unwrap(); + } // Now reload nodes[0]... persister = test_utils::TestPersister::new(); @@ -4200,6 +4219,12 @@ fn test_dup_htlc_onchain_fails_on_reload() { check_added_monitors!(nodes[0], 1); nodes[0].node = &nodes_0_deserialized; + if persist_manager_post_event { + assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); + } else { + expect_payment_sent!(nodes[0], payment_preimage); + } + // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but // which the current ChannelMonitor has not seen), the ChannelManager's de-duplication of // payment events should kick in, leaving us with no pending events here. @@ -4208,6 +4233,12 @@ fn test_dup_htlc_onchain_fails_on_reload() { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); } +#[test] +fn test_dup_htlc_onchain_fails_on_reload() { + do_test_dup_htlc_onchain_fails_on_reload(true); + do_test_dup_htlc_onchain_fails_on_reload(false); +} + #[test] fn test_manager_serialize_deserialize_events() { // This test makes sure the events field in ChannelManager survives de/serialization @@ -7267,7 +7298,8 @@ fn test_check_htlc_underpaying() { // Create some initial channels create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 10_000, TEST_FINAL_CLTV, nodes[0].logger).unwrap(); + let scorer = Scorer::new(0); + let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer).unwrap(); let (_, our_payment_hash, _) = get_payment_preimage_hash!(nodes[0]); let our_payment_secret = nodes[1].node.create_inbound_payment_for_hash(our_payment_hash, Some(100_000), 7200, 0).unwrap(); nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); @@ -7664,11 +7696,12 @@ fn test_bump_penalty_txn_on_revoked_htlcs() { let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known()); // Lock HTLC in both directions (using a slightly lower CLTV delay to provide timely RBF bumps) + let scorer = Scorer::new(0); let route = get_route(&nodes[0].node.get_our_node_id(), &nodes[0].net_graph_msg_handler.network_graph, - &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap(); + &nodes[1].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger, &scorer).unwrap(); let payment_preimage = send_along_route(&nodes[0], route, &[&nodes[1]], 3_000_000).0; let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph, - &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger).unwrap(); + &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 3_000_000, 50, nodes[0].logger, &scorer).unwrap(); send_along_route(&nodes[1], route, &[&nodes[0]], 3_000_000); let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2); @@ -9157,8 +9190,9 @@ fn test_keysend_payments_to_public_node() { let network_graph = &nodes[0].net_graph_msg_handler.network_graph; let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); + let scorer = Scorer::new(0); let route = get_keysend_route( - &payer_pubkey, &network_graph, &payee_pubkey, None, &vec![], 10000, 40, nodes[0].logger + &payer_pubkey, &network_graph, &payee_pubkey, None, &vec![], 10000, 40, nodes[0].logger, &scorer ).unwrap(); let test_preimage = PaymentPreimage([42; 32]); @@ -9187,9 +9221,10 @@ fn test_keysend_payments_to_private_node() { let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); let network_graph = &nodes[0].net_graph_msg_handler.network_graph; let first_hops = nodes[0].node.list_usable_channels(); + let scorer = Scorer::new(0); let route = get_keysend_route( &payer_pubkey, &network_graph, &payee_pubkey, Some(&first_hops.iter().collect::>()), - &vec![], 10000, 40, nodes[0].logger + &vec![], 10000, 40, nodes[0].logger, &scorer ).unwrap(); let test_preimage = PaymentPreimage([42; 32]);