X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=f44a8d59e0981c75d2207163e0a7f8aaf317a89f;hb=6df9129ace609bfb5c7f08ae0f41175126d05b1b;hp=ce9ec1d5853adc9851fc4475de83b7f312570877;hpb=b19d4475cbfb4c784d7f2ba3125baf31c81d4df0;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ce9ec1d5..f44a8d59 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1,3 +1,12 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + //! Tests that test standing up a network of ChannelManagers, creating channels, sending //! payments/messages between them, and often checking the resulting ChannelMonitors are able to //! claim outputs on-chain. @@ -15,7 +24,7 @@ use ln::{chan_utils, onion_utils}; use routing::router::{Route, RouteHop, get_route}; use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use ln::msgs; -use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction}; +use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction, OptionalField}; use util::enforcing_trait_impls::EnforcingChannelKeys; use util::{byte_utils, test_utils}; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; @@ -52,6 +61,7 @@ use std::sync::atomic::Ordering; use std::{mem, io}; use ln::functional_test_utils::*; +use ln::chan_utils::PreCalculatedTxCreationKeys; #[test] fn test_insane_channel_opens() { @@ -1694,8 +1704,8 @@ fn test_fee_spike_violation_fails_htlc() { let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); let local_chan_keys = local_chan.get_local_keys(); - local_chan_keys.sign_remote_commitment(feerate_per_kw, &commit_tx, &commit_tx_keys, &[&accepted_htlc_info], - BREAKDOWN_TIMEOUT, &secp_ctx).unwrap() + let pre_commit_tx_keys = PreCalculatedTxCreationKeys::new(commit_tx_keys); + local_chan_keys.sign_remote_commitment(feerate_per_kw, &commit_tx, &pre_commit_tx_keys, &[&accepted_htlc_info], &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -1883,6 +1893,25 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { check_added_monitors!(nodes[1], 1); } +#[test] +fn test_inbound_outbound_capacity_is_not_zero() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let channels0 = node_chanmgrs[0].list_channels(); + let channels1 = node_chanmgrs[1].list_channels(); + assert_eq!(channels0.len(), 1); + assert_eq!(channels1.len(), 1); + + assert_eq!(channels0[0].inbound_capacity_msat, 95000000); + assert_eq!(channels1[0].outbound_capacity_msat, 95000000); + + assert_eq!(channels0[0].outbound_capacity_msat, 100000 * 1000 - 95000000); + assert_eq!(channels1[0].inbound_capacity_msat, 100000 * 1000 - 95000000); +} + fn commit_tx_fee_msat(feerate: u32, num_htlcs: u64) -> u64 { (COMMITMENT_TX_BASE_WEIGHT + num_htlcs * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate as u64 / 1000 * 1000 } @@ -2391,13 +2420,21 @@ fn channel_monitor_network_test() { // CLTV expires at TEST_FINAL_CLTV + 1 (current height) + 1 (added in send_payment for // buffer space). - { + let (close_chan_update_1, close_chan_update_2) = { let mut header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[3].block_notifier.block_connected_checked(&header, 2, &Vec::new()[..], &[0; 0]); for i in 3..TEST_FINAL_CLTV + 2 + LATENCY_GRACE_PERIOD_BLOCKS + 1 { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[3].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]); } + let events = nodes[3].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let close_chan_update_1 = match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + msg.clone() + }, + _ => panic!("Unexpected event"), + }; check_added_monitors!(nodes[3], 1); // Clear bumped claiming txn spending node 2 commitment tx. Bumped txn are generated after reaching some height timer. @@ -2422,7 +2459,14 @@ fn channel_monitor_network_test() { header = BlockHeader { version: 0x20000000, prev_blockhash: header.bitcoin_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }; nodes[4].block_notifier.block_connected_checked(&header, i, &Vec::new()[..], &[0; 0]); } - + let events = nodes[4].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let close_chan_update_2 = match events[0] { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + msg.clone() + }, + _ => panic!("Unexpected event"), + }; check_added_monitors!(nodes[4], 1); test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS); @@ -2430,8 +2474,10 @@ fn channel_monitor_network_test() { nodes[4].block_notifier.block_connected(&Block { header, txdata: vec![node_txn[0].clone()] }, TEST_FINAL_CLTV - 5); check_preimage_claim(&nodes[4], &node_txn); - } - get_announce_close_broadcast_events(&nodes, 3, 4); + (close_chan_update_1, close_chan_update_2) + }; + nodes[3].net_graph_msg_handler.handle_channel_update(&close_chan_update_2).unwrap(); + nodes[4].net_graph_msg_handler.handle_channel_update(&close_chan_update_1).unwrap(); assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); } @@ -3579,7 +3625,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { let logger = test_utils::TestLogger::new(); let payment_event = { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); nodes[0].node.send_payment(&route, payment_hash_1, &None).unwrap(); check_added_monitors!(nodes[0], 1); @@ -3754,7 +3802,9 @@ fn do_test_drop_messages_peer_disconnect(messages_delivered: u8) { // Channel should still work fine... let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; - let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels()), &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[1].node.get_our_node_id(), Some(&nodes[0].node.list_usable_channels().iter().collect::>()), + &Vec::new(), 1000000, TEST_FINAL_CLTV, &logger).unwrap(); let payment_preimage_2 = send_along_route(&nodes[0], route, &[&nodes[1]], 1000000).0; claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2, 1_000_000); } @@ -4622,7 +4672,7 @@ macro_rules! check_spendable_outputs { match *outp { SpendableOutputDescriptor::StaticOutputRemotePayment { ref outpoint, ref output, ref key_derivation_params } => { let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: 0, witness: Vec::new(), @@ -4650,7 +4700,7 @@ macro_rules! check_spendable_outputs { }, SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref key_derivation_params, ref remote_revocation_pubkey } => { let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: *to_self_delay as u32, witness: Vec::new(), @@ -4683,7 +4733,7 @@ macro_rules! check_spendable_outputs { SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => { let secp_ctx = Secp256k1::new(); let input = TxIn { - previous_output: outpoint.clone(), + previous_output: outpoint.into_bitcoin_outpoint(), script_sig: Script::new(), sequence: 0, witness: Vec::new(), @@ -6058,6 +6108,7 @@ impl msgs::ChannelUpdate { flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, + htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: vec![], @@ -6387,6 +6438,342 @@ fn bolt2_open_channel_sending_node_checks_part2() { assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok()); } +// Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC +// originated from our node, its failure is surfaced to the user. We trigger this failure to +// free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC +// is no longer affordable once it's freed. +#[test] +fn test_fail_holding_cell_htlc_upon_free() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First nodes[0] generates an update_fee, setting the channel's + // pending_update_fee. + nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); + + let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], chan.2); + + // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap(); + + // Send a payment which passes reserve checks but gets stuck in the holding cell. + nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); + + // Flush the pending fee update. + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); + let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack); + check_added_monitors!(nodes[0], 1); + + // Upon receipt of the RAA, there will be an attempt to resend the holding cell + // HTLC, but now that the fee has been raised the payment will now fail, causing + // us to surface its failure to the user. + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 1 HTLC updates".to_string(), 1); + let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put us under local channel reserve value ({})", log_bytes!(our_payment_hash.0), chan_stat.channel_reserve_msat); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); + + // Check that the payment failed to be sent out. + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => { + assert_eq!(our_payment_hash.clone(), *payment_hash); + assert_eq!(*rejected_by_dest, false); + assert_eq!(*error_code, None); + assert_eq!(*error_data, None); + }, + _ => panic!("Unexpected event"), + } +} + +// Test that if multiple HTLCs are released from the holding cell and one is +// valid but the other is no longer valid upon release, the valid HTLC can be +// successfully completed while the other one fails as expected. +#[test] +fn test_free_and_fail_holding_cell_htlcs() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First nodes[0] generates an update_fee, setting the channel's + // pending_update_fee. + nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 200).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); + + let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], chan.2); + + // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + let amt_1 = 20000; + let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1) - amt_1; + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], amt_1, TEST_FINAL_CLTV, &logger).unwrap(); + let route_2 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], amt_2, TEST_FINAL_CLTV, &logger).unwrap(); + + // Send 2 payments which pass reserve checks but get stuck in the holding cell. + nodes[0].node.send_payment(&route_1, payment_hash_1, &None).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); + nodes[0].node.send_payment(&route_2, payment_hash_2, &None).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); + + // Flush the pending fee update. + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); + let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed); + check_added_monitors!(nodes[0], 2); + + // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, + // but now that the fee has been raised the second payment will now fail, causing us + // to surface its failure to the user. The first payment should succeed. + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 2 HTLC updates".to_string(), 1); + let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put us under local channel reserve value ({})", log_bytes!(payment_hash_2.0), chan_stat.channel_reserve_msat); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); + + // Check that the second payment failed to be sent out. + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => { + assert_eq!(payment_hash_2.clone(), *payment_hash); + assert_eq!(*rejected_by_dest, false); + assert_eq!(*error_code, None); + assert_eq!(*error_data, None); + }, + _ => panic!("Unexpected event"), + } + + // Complete the first payment and the RAA from the fee update. + let (payment_event, send_raa_event) = { + let mut msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msgs.len(), 2); + (SendEvent::from_event(msgs.remove(0)), msgs.remove(0)) + }; + let raa = match send_raa_event { + MessageSendEvent::SendRevokeAndACK { msg, .. } => msg, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => {}, + _ => panic!("Unexpected event"), + } + nodes[1].node.process_pending_htlc_forwards(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { .. } => {}, + _ => panic!("Unexpected event"), + } + nodes[1].node.claim_funds(payment_preimage_1, &None, amt_1); + check_added_monitors!(nodes[1], 1); + let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + } + _ => panic!("Unexpected event"), + } +} + +// Test that if we fail to forward an HTLC that is being freed from the holding cell that the +// HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing +// our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable +// once it's freed. +#[test] +fn test_fail_holding_cell_htlc_upon_free_multihop() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First nodes[1] generates an update_fee, setting the channel's + // pending_update_fee. + nodes[1].node.update_fee(chan_1_2.2, get_feerate!(nodes[1], chan_1_2.2) + 20).unwrap(); + check_added_monitors!(nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap()); + + let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], chan_0_1.2); + + // Send a payment which passes reserve checks but gets stuck in the holding cell. + let feemsat = 239; + let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1) - total_routing_fee_msat; + let payment_event = { + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap(); + nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); + + // Flush the pending fee update. + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed); + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[2], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed); + check_added_monitors!(nodes[1], 2); + + // A final RAA message is generated to finalize the fee update. + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + + let raa_msg = match &events[0] { + &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => { + msg.clone() + }, + _ => panic!("Unexpected event"), + }; + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. + let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(process_htlc_forwards_event.len(), 1); + match &process_htlc_forwards_event[0] { + &Event::PendingHTLCsForwardable { .. } => {}, + _ => panic!("Unexpected event"), + } + + // In response, we call ChannelManager's process_pending_htlc_forwards + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); + + // This causes the HTLC to be failed backwards. + let fail_event = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(fail_event.len(), 1); + let (fail_msg, commitment_signed) = match &fail_event[0] { + &MessageSendEvent::UpdateHTLCs { ref updates, .. } => { + assert_eq!(updates.update_add_htlcs.len(), 0); + assert_eq!(updates.update_fulfill_htlcs.len(), 0); + assert_eq!(updates.update_fail_malformed_htlcs.len(), 0); + assert_eq!(updates.update_fail_htlcs.len(), 1); + (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + // Pass the failure messages back to nodes[0]. + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed); + + // Complete the HTLC failure+removal process. + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed); + check_added_monitors!(nodes[1], 2); + let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(final_raa_event.len(), 1); + let raa = match &final_raa_event[0] { + &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(), + _ => panic!("Unexpected event"), + }; + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); + let fail_msg_event = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(fail_msg_event.len(), 1); + match &fail_msg_event[0] { + &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let failure_event = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failure_event.len(), 1); + match &failure_event[0] { + &Event::PaymentFailed { rejected_by_dest, .. } => { + assert!(!rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} + // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve. //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO.