X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;ds=sidebyside;f=lightning%2Fsrc%2Fln%2Ffunctional_tests.rs;h=cead8feba6335ecf359dc5d44e43830b3bd6b686;hb=4395b92cc8bfe0cc803e70bba11f4db58d5d0dbf;hp=0200ece21734b66f184336efb0ec9c8768988bde;hpb=94c37c6c0e6825974120bf694524844f2bc07985;p=rust-lightning diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 0200ece2..cead8feb 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -1,3 +1,12 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + //! Tests that test standing up a network of ChannelManagers, creating channels, sending //! payments/messages between them, and often checking the resulting ChannelMonitors are able to //! claim outputs on-chain. @@ -15,7 +24,7 @@ use ln::{chan_utils, onion_utils}; use routing::router::{Route, RouteHop, get_route}; use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use ln::msgs; -use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction}; +use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction, OptionalField}; use util::enforcing_trait_impls::EnforcingChannelKeys; use util::{byte_utils, test_utils}; use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider}; @@ -43,6 +52,8 @@ use bitcoin::hashes::Hash; use bitcoin::secp256k1::{Secp256k1, Message}; use bitcoin::secp256k1::key::{PublicKey,SecretKey}; +use regex; + use std::collections::{BTreeSet, HashMap, HashSet}; use std::default::Default; use std::sync::{Arc, Mutex}; @@ -50,6 +61,7 @@ use std::sync::atomic::Ordering; use std::{mem, io}; use ln::functional_test_utils::*; +use ln::chan_utils::PreCalculatedTxCreationKeys; #[test] fn test_insane_channel_opens() { @@ -77,10 +89,11 @@ fn test_insane_channel_opens() { nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &message_mutator(open_channel_message.clone())); let msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 1); + let expected_regex = regex::Regex::new(expected_error_str).unwrap(); if let MessageSendEvent::HandleError { ref action, .. } = msg_events[0] { match action { &ErrorAction::SendErrorMessage { .. } => { - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), expected_error_str.to_string(), 1); + nodes[1].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), expected_regex, 1); }, _ => panic!("unexpected event!"), } @@ -91,23 +104,23 @@ fn test_insane_channel_opens() { use ln::channelmanager::MAX_LOCAL_BREAKDOWN_TIMEOUT; // Test all mutations that would make the channel open message insane - insane_open_helper("funding value > 2^24", |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg }); + insane_open_helper(format!("Funding must be smaller than {}. It was {}", MAX_FUNDING_SATOSHIS, MAX_FUNDING_SATOSHIS).as_str(), |mut msg| { msg.funding_satoshis = MAX_FUNDING_SATOSHIS; msg }); insane_open_helper("Bogus channel_reserve_satoshis", |mut msg| { msg.channel_reserve_satoshis = msg.funding_satoshis + 1; msg }); - insane_open_helper("push_msat larger than funding value", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg }); + insane_open_helper(r"push_msat \d+ was larger than funding value \d+", |mut msg| { msg.push_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000 + 1; msg }); insane_open_helper("Peer never wants payout outputs?", |mut msg| { msg.dust_limit_satoshis = msg.funding_satoshis + 1 ; msg }); - insane_open_helper("Bogus; channel reserve is less than dust limit", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg }); + insane_open_helper(r"Bogus; channel reserve \(\d+\) is less than dust limit \(\d+\)", |mut msg| { msg.dust_limit_satoshis = msg.channel_reserve_satoshis + 1; msg }); - insane_open_helper("Minimum htlc value is full channel value", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); + insane_open_helper(r"Minimum htlc value \(\d+\) was larger than full channel value \(\d+\)", |mut msg| { msg.htlc_minimum_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000; msg }); insane_open_helper("They wanted our payments to be delayed by a needlessly long period", |mut msg| { msg.to_self_delay = MAX_LOCAL_BREAKDOWN_TIMEOUT + 1; msg }); - insane_open_helper("0 max_accpted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg }); + insane_open_helper("0 max_accepted_htlcs makes for a useless channel", |mut msg| { msg.max_accepted_htlcs = 0; msg }); - insane_open_helper("max_accpted_htlcs > 483", |mut msg| { msg.max_accepted_htlcs = 484; msg }); + insane_open_helper("max_accepted_htlcs was 484. It must not be larger than 483", |mut msg| { msg.max_accepted_htlcs = 484; msg }); } #[test] @@ -1270,7 +1283,7 @@ fn fake_network_test() { route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000); let events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); + nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1); //TODO: Test that routes work again here as we've been notified that the channel is full @@ -1321,10 +1334,10 @@ fn holding_cell_htlc_counting() { { let net_graph_msg_handler = &nodes[1].net_graph_msg_handler; let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), None, &Vec::new(), 100000, TEST_FINAL_CLTV, &logger).unwrap(); - unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable { err }, - assert_eq!(err, "Cannot push more than their max accepted HTLCs")); + unwrap_send_err!(nodes[1].node.send_payment(&route, payment_hash_1, &None), true, APIError::ChannelUnavailable { ref err }, + assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); } // This should also be true if we try to forward a payment. @@ -1540,16 +1553,16 @@ fn test_basic_channel_reserve() { let err = nodes[0].node.send_payment(&route, our_payment_hash, &None).err().unwrap(); match err { PaymentSendFailure::AllFailedRetrySafe(ref fails) => { - match fails[0] { - APIError::ChannelUnavailable{err} => - assert_eq!(err, "Cannot send value that would put us under local channel reserve value"), + match &fails[0] { + &APIError::ChannelUnavailable{ref err} => + assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err)), _ => panic!("Unexpected error variant"), } }, _ => panic!("Unexpected error variant"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 1); send_payment(&nodes[0], &vec![&nodes[1]], max_can_send, max_can_send); } @@ -1599,29 +1612,29 @@ fn test_fee_spike_violation_fails_htlc() { let feerate_per_kw = get_feerate!(nodes[0], chan.2); + const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; + // Get the EnforcingChannelKeys for each channel, which will be used to (1) get the keys // needed to sign the new commitment tx and (2) sign the new commitment tx. - let (local_revocation_basepoint, local_htlc_basepoint, local_payment_point, local_chan_commitment_seed) = { + let (local_revocation_basepoint, local_htlc_basepoint, local_payment_point, local_secret, local_secret2) = { let chan_lock = nodes[0].node.channel_state.lock().unwrap(); let local_chan = chan_lock.by_id.get(&chan.2).unwrap(); let chan_keys = local_chan.get_local_keys(); let pubkeys = chan_keys.pubkeys(); - (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.payment_point, *chan_keys.commitment_seed()) + (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.payment_point, + chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER), chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)) }; - let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_payment_point, remote_chan_commitment_seed) = { + let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_payment_point, remote_secret1) = { let chan_lock = nodes[1].node.channel_state.lock().unwrap(); let remote_chan = chan_lock.by_id.get(&chan.2).unwrap(); let chan_keys = remote_chan.get_local_keys(); let pubkeys = chan_keys.pubkeys(); - (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, pubkeys.payment_point, *chan_keys.commitment_seed()) + (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, pubkeys.payment_point, + chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1)) }; // Assemble the set of keys we can use for signatures for our commitment_signed message. - const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; - let commitment_secret = { - let res = chan_utils::build_commitment_secret(&remote_chan_commitment_seed, INITIAL_COMMITMENT_NUMBER - 1); - SecretKey::from_slice(&res).unwrap() - }; + let commitment_secret = SecretKey::from_slice(&remote_secret1).unwrap(); let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &commitment_secret); let commit_tx_keys = chan_utils::TxCreationKeys::new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint, &remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap(); @@ -1691,8 +1704,8 @@ fn test_fee_spike_violation_fails_htlc() { let local_chan_lock = nodes[0].node.channel_state.lock().unwrap(); let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap(); let local_chan_keys = local_chan.get_local_keys(); - local_chan_keys.sign_remote_commitment(feerate_per_kw, &commit_tx, &commit_tx_keys, &[&accepted_htlc_info], - BREAKDOWN_TIMEOUT, &secp_ctx).unwrap() + let pre_commit_tx_keys = PreCalculatedTxCreationKeys::new(commit_tx_keys); + local_chan_keys.sign_remote_commitment(feerate_per_kw, &commit_tx, &pre_commit_tx_keys, &[&accepted_htlc_info], &secp_ctx).unwrap() }; let commit_signed_msg = msgs::CommitmentSigned { @@ -1706,8 +1719,8 @@ fn test_fee_spike_violation_fails_htlc() { let _ = nodes[1].node.get_and_clear_pending_msg_events(); // Send the RAA to nodes[1]. - let per_commitment_secret = chan_utils::build_commitment_secret(&local_chan_commitment_seed, INITIAL_COMMITMENT_NUMBER); - let next_secret = SecretKey::from_slice(&chan_utils::build_commitment_secret(&local_chan_commitment_seed, INITIAL_COMMITMENT_NUMBER - 2)).unwrap(); + let per_commitment_secret = local_secret; + let next_secret = SecretKey::from_slice(&local_secret2).unwrap(); let next_per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &next_secret); let raa_msg = msgs::RevokeAndACK{ channel_id: chan.2, per_commitment_secret, next_per_commitment_point}; nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg); @@ -1751,7 +1764,7 @@ fn test_chan_reserve_violation_outbound_htlc_inbound_chan() { }; let (route, our_payment_hash, _) = get_route_and_payment_hash!(1000); - unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, + unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send value that would put them under remote channel reserve value")); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put them under remote channel reserve value".to_string(), 1); @@ -1929,10 +1942,10 @@ fn test_channel_reserve_holding_cell_htlcs() { { let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1); assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat)); - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, - assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept")); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, + assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); } // channel reserve is bigger than their_max_htlc_value_in_flight_msat so loop to deplete @@ -1993,8 +2006,8 @@ fn test_channel_reserve_holding_cell_htlcs() { let recv_value_2 = stat01.value_to_self_msat - amt_msat_1 - stat01.channel_reserve_msat - total_fee_msat - commit_tx_fee_2_htlcs; { let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_2 + 1); - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, - assert_eq!(err, "Cannot send value that would put us under local channel reserve value")); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, + assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -2019,10 +2032,10 @@ fn test_channel_reserve_holding_cell_htlcs() { // test with outbound holding cell amount > 0 { let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_22+1); - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, - assert_eq!(err, "Cannot send value that would put us under local channel reserve value")); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, + assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 2); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 2); } let (route_22, our_payment_hash_22, our_payment_preimage_22) = get_route_and_payment_hash!(recv_value_22); @@ -2105,16 +2118,16 @@ fn test_channel_reserve_holding_cell_htlcs() { let err = nodes[0].node.send_payment(&route, our_payment_hash, &None).err().unwrap(); match err { PaymentSendFailure::AllFailedRetrySafe(ref fails) => { - match fails[0] { - APIError::ChannelUnavailable{err} => - assert_eq!(err, "Cannot send value that would put us under local channel reserve value"), + match &fails[0] { + &APIError::ChannelUnavailable{ref err} => + assert!(regex::Regex::new(r"Cannot send value that would put us under local channel reserve value \(\d+\)").unwrap().is_match(err)), _ => panic!("Unexpected error variant"), } }, _ => panic!("Unexpected error variant"), } assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 3); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us under local channel reserve value".to_string(), 3); } send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3, recv_value_3); @@ -6055,6 +6068,7 @@ impl msgs::ChannelUpdate { flags: 0, cltv_expiry_delta: 0, htlc_minimum_msat: 0, + htlc_maximum_msat: OptionalField::Absent, fee_base_msat: 0, fee_proportional_millionths: 0, excess_data: vec![], @@ -6384,6 +6398,342 @@ fn bolt2_open_channel_sending_node_checks_part2() { assert!(PublicKey::from_slice(&node0_to_1_send_open_channel.delayed_payment_basepoint.serialize()).is_ok()); } +// Test that if we fail to send an HTLC that is being freed from the holding cell, and the HTLC +// originated from our node, its failure is surfaced to the user. We trigger this failure to +// free the HTLC by increasing our fee while the HTLC is in the holding cell such that the HTLC +// is no longer affordable once it's freed. +#[test] +fn test_fail_holding_cell_htlc_upon_free() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First nodes[0] generates an update_fee, setting the channel's + // pending_update_fee. + nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 20).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); + + let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], chan.2); + + // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1); + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap(); + + // Send a payment which passes reserve checks but gets stuck in the holding cell. + nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); + + // Flush the pending fee update. + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); + let (as_revoke_and_ack, _) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &as_revoke_and_ack); + check_added_monitors!(nodes[0], 1); + + // Upon receipt of the RAA, there will be an attempt to resend the holding cell + // HTLC, but now that the fee has been raised the payment will now fail, causing + // us to surface its failure to the user. + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 1 HTLC updates".to_string(), 1); + let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put us under local channel reserve value ({})", log_bytes!(our_payment_hash.0), chan_stat.channel_reserve_msat); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); + + // Check that the payment failed to be sent out. + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => { + assert_eq!(our_payment_hash.clone(), *payment_hash); + assert_eq!(*rejected_by_dest, false); + assert_eq!(*error_code, None); + assert_eq!(*error_data, None); + }, + _ => panic!("Unexpected event"), + } +} + +// Test that if multiple HTLCs are released from the holding cell and one is +// valid but the other is no longer valid upon release, the valid HTLC can be +// successfully completed while the other one fails as expected. +#[test] +fn test_free_and_fail_holding_cell_htlcs() { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First nodes[0] generates an update_fee, setting the channel's + // pending_update_fee. + nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 200).unwrap(); + check_added_monitors!(nodes[0], 1); + + let events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msg.unwrap()); + + let mut chan_stat = get_channel_value_stat!(nodes[0], chan.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], chan.2); + + // 2* and +1 HTLCs on the commit tx fee calculation for the fee spike reserve. + let (payment_preimage_1, payment_hash_1) = get_payment_preimage_hash!(nodes[0]); + let amt_1 = 20000; + let (_, payment_hash_2) = get_payment_preimage_hash!(nodes[0]); + let amt_2 = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 2 + 1) - amt_1; + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route_1 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], amt_1, TEST_FINAL_CLTV, &logger).unwrap(); + let route_2 = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], amt_2, TEST_FINAL_CLTV, &logger).unwrap(); + + // Send 2 payments which pass reserve checks but get stuck in the holding cell. + nodes[0].node.send_payment(&route_1, payment_hash_1, &None).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1); + nodes[0].node.send_payment(&route_2, payment_hash_2, &None).unwrap(); + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, amt_1 + amt_2); + + // Flush the pending fee update. + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), commitment_signed); + let (revoke_and_ack, commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &revoke_and_ack); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed); + check_added_monitors!(nodes[0], 2); + + // Upon receipt of the RAA, there will be an attempt to resend the holding cell HTLCs, + // but now that the fee has been raised the second payment will now fail, causing us + // to surface its failure to the user. The first payment should succeed. + chan_stat = get_channel_value_stat!(nodes[0], chan.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, 0); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), "Freeing holding cell with 2 HTLC updates".to_string(), 1); + let failure_log = format!("Failed to send HTLC with payment_hash {} due to Cannot send value that would put us under local channel reserve value ({})", log_bytes!(payment_hash_2.0), chan_stat.channel_reserve_msat); + nodes[0].logger.assert_log("lightning::ln::channel".to_string(), failure_log.to_string(), 1); + + // Check that the second payment failed to be sent out. + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match &events[0] { + &Event::PaymentFailed { ref payment_hash, ref rejected_by_dest, ref error_code, ref error_data } => { + assert_eq!(payment_hash_2.clone(), *payment_hash); + assert_eq!(*rejected_by_dest, false); + assert_eq!(*error_code, None); + assert_eq!(*error_data, None); + }, + _ => panic!("Unexpected event"), + } + + // Complete the first payment and the RAA from the fee update. + let (payment_event, send_raa_event) = { + let mut msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msgs.len(), 2); + (SendEvent::from_event(msgs.remove(0)), msgs.remove(0)) + }; + let raa = match send_raa_event { + MessageSendEvent::SendRevokeAndACK { msg, .. } => msg, + _ => panic!("Unexpected event"), + }; + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa); + check_added_monitors!(nodes[1], 1); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PendingHTLCsForwardable { .. } => {}, + _ => panic!("Unexpected event"), + } + nodes[1].node.process_pending_htlc_forwards(); + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentReceived { .. } => {}, + _ => panic!("Unexpected event"), + } + nodes[1].node.claim_funds(payment_preimage_1, &None, amt_1); + check_added_monitors!(nodes[1], 1); + let update_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_msgs.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], update_msgs.commitment_signed, false, true); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + match events[0] { + Event::PaymentSent { ref payment_preimage } => { + assert_eq!(*payment_preimage, payment_preimage_1); + } + _ => panic!("Unexpected event"), + } +} + +// Test that if we fail to forward an HTLC that is being freed from the holding cell that the +// HTLC is failed backwards. We trigger this failure to forward the freed HTLC by increasing +// our fee while the HTLC is in the holding cell such that the HTLC is no longer affordable +// once it's freed. +#[test] +fn test_fail_holding_cell_htlc_upon_free_multihop() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let chan_0_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 100000, 95000000, InitFeatures::known(), InitFeatures::known()); + let logger = test_utils::TestLogger::new(); + + // First nodes[1] generates an update_fee, setting the channel's + // pending_update_fee. + nodes[1].node.update_fee(chan_1_2.2, get_feerate!(nodes[1], chan_1_2.2) + 20).unwrap(); + check_added_monitors!(nodes[1], 1); + + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let (update_msg, commitment_signed) = match events[0] { + MessageSendEvent::UpdateHTLCs { updates: msgs::CommitmentUpdate { ref update_fee, ref commitment_signed, .. }, .. } => { + (update_fee.as_ref(), commitment_signed) + }, + _ => panic!("Unexpected event"), + }; + + nodes[2].node.handle_update_fee(&nodes[1].node.get_our_node_id(), update_msg.unwrap()); + + let mut chan_stat = get_channel_value_stat!(nodes[0], chan_0_1.2); + let channel_reserve = chan_stat.channel_reserve_msat; + let feerate = get_feerate!(nodes[0], chan_0_1.2); + + // Send a payment which passes reserve checks but gets stuck in the holding cell. + let feemsat = 239; + let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat; + let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); + let max_can_send = 5000000 - channel_reserve - 2*commit_tx_fee_msat(feerate, 1 + 1) - total_routing_fee_msat; + let payment_event = { + let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; + let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2].node.get_our_node_id(), None, &[], max_can_send, TEST_FINAL_CLTV, &logger).unwrap(); + nodes[0].node.send_payment(&route, our_payment_hash, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + + SendEvent::from_event(events.remove(0)) + }; + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[1], 0); + commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); + expect_pending_htlcs_forwardable!(nodes[1]); + + chan_stat = get_channel_value_stat!(nodes[1], chan_1_2.2); + assert_eq!(chan_stat.holding_cell_outbound_amount_msat, max_can_send); + + // Flush the pending fee update. + nodes[2].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed); + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[2], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &raa); + nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &commitment_signed); + check_added_monitors!(nodes[1], 2); + + // A final RAA message is generated to finalize the fee update. + let events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + + let raa_msg = match &events[0] { + &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => { + msg.clone() + }, + _ => panic!("Unexpected event"), + }; + + nodes[2].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa_msg); + check_added_monitors!(nodes[2], 1); + assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); + + // nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process. + let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(process_htlc_forwards_event.len(), 1); + match &process_htlc_forwards_event[0] { + &Event::PendingHTLCsForwardable { .. } => {}, + _ => panic!("Unexpected event"), + } + + // In response, we call ChannelManager's process_pending_htlc_forwards + nodes[1].node.process_pending_htlc_forwards(); + check_added_monitors!(nodes[1], 1); + + // This causes the HTLC to be failed backwards. + let fail_event = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(fail_event.len(), 1); + let (fail_msg, commitment_signed) = match &fail_event[0] { + &MessageSendEvent::UpdateHTLCs { ref updates, .. } => { + assert_eq!(updates.update_add_htlcs.len(), 0); + assert_eq!(updates.update_fulfill_htlcs.len(), 0); + assert_eq!(updates.update_fail_malformed_htlcs.len(), 0); + assert_eq!(updates.update_fail_htlcs.len(), 1); + (updates.update_fail_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), + }; + + // Pass the failure messages back to nodes[0]. + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_msg); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed); + + // Complete the HTLC failure+removal process. + let (raa, commitment_signed) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commitment_signed); + check_added_monitors!(nodes[1], 2); + let final_raa_event = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(final_raa_event.len(), 1); + let raa = match &final_raa_event[0] { + &MessageSendEvent::SendRevokeAndACK { ref msg, .. } => msg.clone(), + _ => panic!("Unexpected event"), + }; + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &raa); + let fail_msg_event = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(fail_msg_event.len(), 1); + match &fail_msg_event[0] { + &MessageSendEvent::PaymentFailureNetworkUpdate { .. } => {}, + _ => panic!("Unexpected event"), + } + let failure_event = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failure_event.len(), 1); + match &failure_event[0] { + &Event::PaymentFailed { rejected_by_dest, .. } => { + assert!(!rejected_by_dest); + }, + _ => panic!("Unexpected event"), + } + check_added_monitors!(nodes[0], 1); +} + // BOLT 2 Requirements for the Sender when constructing and sending an update_add_htlc message. // BOLT 2 Requirement: MUST NOT offer amount_msat it cannot pay for in the remote commitment transaction at the current feerate_per_kw (see "Updating Fees") while maintaining its channel reserve. //TODO: I don't believe this is explicitly enforced when sending an HTLC but as the Fee aspect of the BOLT specs is in flux leaving this as a TODO. @@ -6403,10 +6753,10 @@ fn test_update_add_htlc_bolt2_sender_value_below_minimum_msat() { let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); route.paths[0][0].fee_msat = 100; - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, - assert_eq!(err, "Cannot send less than their minimum HTLC value")); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, + assert!(regex::Regex::new(r"Cannot send less than their minimum HTLC value \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send less than their minimum HTLC value".to_string(), 1); } #[test] @@ -6423,11 +6773,11 @@ fn test_update_add_htlc_bolt2_sender_zero_value_msat() { let logger = test_utils::TestLogger::new(); let mut route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); route.paths[0][0].fee_msat = 0; - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, assert_eq!(err, "Cannot send 0-msat HTLC")); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send 0-msat HTLC".to_string(), 1); } #[test] @@ -6469,8 +6819,8 @@ fn test_update_add_htlc_bolt2_sender_cltv_expiry_too_high() { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000000, 500000001, &logger).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::RouteError { err }, - assert_eq!(err, "Channel CLTV overflowed?!")); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::RouteError { ref err }, + assert_eq!(err, &"Channel CLTV overflowed?")); } #[test] @@ -6513,11 +6863,11 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_num_and_htlc_id_increment() let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]); let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], 100000, TEST_FINAL_CLTV, &logger).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, - assert_eq!(err, "Cannot push more than their max accepted HTLCs")); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, + assert!(regex::Regex::new(r"Cannot push more than their max accepted HTLCs \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot push more than their max accepted HTLCs".to_string(), 1); } #[test] @@ -6537,11 +6887,11 @@ fn test_update_add_htlc_bolt2_sender_exceed_max_htlc_value_in_flight() { let net_graph_msg_handler = &nodes[0].net_graph_msg_handler; let logger = test_utils::TestLogger::new(); let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], max_in_flight+1, TEST_FINAL_CLTV, &logger).unwrap(); - unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { err }, - assert_eq!(err, "Cannot send value that would put us over the max HTLC value in flight our peer will accept")); + unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err }, + assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err))); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put us over the max HTLC value in flight our peer will accept".to_string(), 1); send_payment(&nodes[0], &[&nodes[1]], max_in_flight, max_in_flight); } @@ -6573,7 +6923,7 @@ fn test_update_add_htlc_bolt2_receiver_check_amount_received_more_than_min() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data, "Remote side tried to send less than our minimum HTLC value"); + assert!(regex::Regex::new(r"Remote side tried to send less than our minimum HTLC value\. Lower limit: \(\d+\)\. Actual: \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); } @@ -6653,7 +7003,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_htlc_limit() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data, "Remote tried to push more than our max accepted HTLCs"); + assert!(regex::Regex::new(r"Remote tried to push more than our max accepted HTLCs \(\d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); } @@ -6678,7 +7028,7 @@ fn test_update_add_htlc_bolt2_receiver_check_max_in_flight_msat() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data,"Remote HTLC add would put them over our max HTLC value"); + assert!(regex::Regex::new("Remote HTLC add would put them over our max HTLC value").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); } @@ -6752,7 +7102,7 @@ fn test_update_add_htlc_bolt2_receiver_check_repeated_id_ignore() { assert!(nodes[1].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); - assert_eq!(err_msg.data, "Remote skipped HTLC ID"); + assert!(regex::Regex::new(r"Remote skipped HTLC ID \(skipped ID: \d+\)").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[1], 1); } @@ -6785,7 +7135,7 @@ fn test_update_fulfill_htlc_bolt2_update_fulfill_htlc_before_commitment() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed"); + assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); } @@ -6818,7 +7168,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_htlc_before_commitment() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed"); + assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); } @@ -6852,7 +7202,7 @@ fn test_update_fulfill_htlc_bolt2_update_fail_malformed_htlc_before_commitment() assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Remote tried to fulfill/fail HTLC before it had been committed"); + assert!(regex::Regex::new(r"Remote tried to fulfill/fail HTLC \(\d+\) before it had been committed").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); } @@ -6934,7 +7284,7 @@ fn test_update_fulfill_htlc_bolt2_wrong_preimage() { assert!(nodes[0].node.list_channels().is_empty()); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); - assert_eq!(err_msg.data, "Remote tried to fulfill HTLC with an incorrect preimage"); + assert!(regex::Regex::new(r"Remote tried to fulfill HTLC \(\d+\) with an incorrect preimage").unwrap().is_match(err_msg.data.as_str())); check_added_monitors!(nodes[0], 1); } @@ -7328,7 +7678,7 @@ fn test_upfront_shutdown_script() { node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh(); // Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); - assert_eq!(check_closed_broadcast!(nodes[2], true).unwrap().data, "Got shutdown request with a scriptpubkey which did not match their previous scriptpubkey"); + assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str())); check_added_monitors!(nodes[2], 1); // We test that in case of peer committing upfront to a script, if it doesn't change at closing, we sign @@ -7409,7 +7759,7 @@ fn test_user_configurable_csv_delay() { let keys_manager: Arc> = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet)); if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) { match error { - APIError::APIMisuseError { err } => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); }, + APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), } } else { assert!(false) } @@ -7420,7 +7770,7 @@ fn test_user_configurable_csv_delay() { open_channel.to_self_delay = 200; if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) { match error { - ChannelError::Close(err) => { assert_eq!(err, "Configured with an unreasonable our_to_self_delay putting user funds at risks"); }, + ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), } } else { assert!(false); } @@ -7434,7 +7784,7 @@ fn test_user_configurable_csv_delay() { if let MessageSendEvent::HandleError { ref action, .. } = nodes[0].node.get_and_clear_pending_msg_events()[0] { match action { &ErrorAction::SendErrorMessage { ref msg } => { - assert_eq!(msg.data,"They wanted our payments to be delayed by a needlessly long period"); + assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(msg.data.as_str())); }, _ => { assert!(false); } } @@ -7446,7 +7796,7 @@ fn test_user_configurable_csv_delay() { open_channel.to_self_delay = 200; if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) { match error { - ChannelError::Close(err) => { assert_eq!(err, "They wanted our payments to be delayed by a needlessly long period"); }, + ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); }, _ => panic!("Unexpected event"), } } else { assert!(false); } @@ -8125,11 +8475,12 @@ fn test_counterparty_raa_skip_no_crash() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2; - let commitment_seed = nodes[0].node.channel_state.lock().unwrap().by_id.get_mut(&channel_id).unwrap().local_keys.commitment_seed().clone(); + let mut guard = nodes[0].node.channel_state.lock().unwrap(); + let local_keys = &guard.by_id.get_mut(&channel_id).unwrap().local_keys; const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1; let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(), - &SecretKey::from_slice(&chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); - let per_commitment_secret = chan_utils::build_commitment_secret(&commitment_seed, INITIAL_COMMITMENT_NUMBER); + &SecretKey::from_slice(&local_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap()); + let per_commitment_secret = local_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });