From: Matt Corallo <649246+TheBlueMatt@users.noreply.github.com> Date: Tue, 3 May 2022 22:44:26 +0000 (+0000) Subject: Merge pull request #1444 from ViktorTigerstrom/2022-04-use-counterparty-htlc-max... X-Git-Tag: v0.0.107~44 X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=6418c9ef0dd68e87444b690d0583e8a22cf486dc;hp=224d470d389704f9481df537e6e6d91f4909d5fd;p=rust-lightning Merge pull request #1444 from ViktorTigerstrom/2022-04-use-counterparty-htlc-max-for-chan-updates Set `ChannelUpdate` `htlc_maximum_msat` using the peer's value --- diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 5990b2ad..a28d6347 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -159,20 +159,26 @@ pub(crate) struct HTLCPreviousHopData { } enum OnionPayload { - /// Contains a total_msat (which may differ from value if this is a Multi-Path Payment) and a - /// payment_secret which prevents path-probing attacks and can associate different HTLCs which - /// are part of the same payment. - Invoice(msgs::FinalOnionHopData), + /// Indicates this incoming onion payload is for the purpose of paying an invoice. + Invoice { + /// This is only here for backwards-compatibility in serialization, in the future it can be + /// removed, breaking clients running 0.0.106 and earlier. + _legacy_hop_data: msgs::FinalOnionHopData, + }, /// Contains the payer-provided preimage. Spontaneous(PaymentPreimage), } +/// HTLCs that are to us and can be failed/claimed by the user struct ClaimableHTLC { prev_hop: HTLCPreviousHopData, cltv_expiry: u32, + /// The amount (in msats) of this MPP part value: u64, onion_payload: OnionPayload, timer_ticks: u8, + /// The sum total of all MPP parts + total_msat: u64, } /// A payment identifier used to uniquely identify a payment to LDK. @@ -3096,11 +3102,13 @@ impl ChannelMana HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, amt_to_forward, .. }, prev_funding_outpoint } => { - let (cltv_expiry, onion_payload, phantom_shared_secret) = match routing { - PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => - (incoming_cltv_expiry, OnionPayload::Invoice(payment_data), phantom_shared_secret), + let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing { + PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => { + let _legacy_hop_data = payment_data.clone(); + (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret) + }, PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } => - (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None), + (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None), _ => { panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive"); } @@ -3115,6 +3123,7 @@ impl ChannelMana }, value: amt_to_forward, timer_ticks: 0, + total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward }, cltv_expiry, onion_payload, }; @@ -3138,7 +3147,7 @@ impl ChannelMana } macro_rules! check_total_value { - ($payment_data_total_msat: expr, $payment_secret: expr, $payment_preimage: expr) => {{ + ($payment_data: expr, $payment_preimage: expr) => {{ let mut payment_received_generated = false; let htlcs = channel_state.claimable_htlcs.entry(payment_hash) .or_insert(Vec::new()); @@ -3153,10 +3162,10 @@ impl ChannelMana for htlc in htlcs.iter() { total_value += htlc.value; match &htlc.onion_payload { - OnionPayload::Invoice(htlc_payment_data) => { - if htlc_payment_data.total_msat != $payment_data_total_msat { + OnionPayload::Invoice { .. } => { + if htlc.total_msat != $payment_data.total_msat { log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})", - log_bytes!(payment_hash.0), $payment_data_total_msat, htlc_payment_data.total_msat); + log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat); total_value = msgs::MAX_VALUE_MSAT; } if total_value >= msgs::MAX_VALUE_MSAT { break; } @@ -3164,17 +3173,17 @@ impl ChannelMana _ => unreachable!(), } } - if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data_total_msat { + if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat { log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)", - log_bytes!(payment_hash.0), total_value, $payment_data_total_msat); + log_bytes!(payment_hash.0), total_value, $payment_data.total_msat); fail_htlc!(claimable_htlc); - } else if total_value == $payment_data_total_msat { + } else if total_value == $payment_data.total_msat { htlcs.push(claimable_htlc); new_events.push(events::Event::PaymentReceived { payment_hash, purpose: events::PaymentPurpose::InvoicePayment { payment_preimage: $payment_preimage, - payment_secret: $payment_secret, + payment_secret: $payment_data.payment_secret, }, amt: total_value, }); @@ -3199,17 +3208,16 @@ impl ChannelMana match payment_secrets.entry(payment_hash) { hash_map::Entry::Vacant(_) => { match claimable_htlc.onion_payload { - OnionPayload::Invoice(ref payment_data) => { - let payment_preimage = match inbound_payment::verify(payment_hash, payment_data.clone(), self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { + OnionPayload::Invoice { .. } => { + let payment_data = payment_data.unwrap(); + let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) { Ok(payment_preimage) => payment_preimage, Err(()) => { fail_htlc!(claimable_htlc); continue } }; - let payment_data_total_msat = payment_data.total_msat; - let payment_secret = payment_data.payment_secret.clone(); - check_total_value!(payment_data_total_msat, payment_secret, payment_preimage); + check_total_value!(payment_data, payment_preimage); }, OnionPayload::Spontaneous(preimage) => { match channel_state.claimable_htlcs.entry(payment_hash) { @@ -3230,14 +3238,12 @@ impl ChannelMana } }, hash_map::Entry::Occupied(inbound_payment) => { - let payment_data = - if let OnionPayload::Invoice(ref data) = claimable_htlc.onion_payload { - data.clone() - } else { - log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); - fail_htlc!(claimable_htlc); - continue - }; + if payment_data.is_none() { + log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0)); + fail_htlc!(claimable_htlc); + continue + }; + let payment_data = payment_data.unwrap(); if inbound_payment.get().payment_secret != payment_data.payment_secret { log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0)); fail_htlc!(claimable_htlc); @@ -3246,7 +3252,7 @@ impl ChannelMana log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap()); fail_htlc!(claimable_htlc); } else { - let payment_received_generated = check_total_value!(payment_data.total_msat, payment_data.payment_secret, inbound_payment.get().payment_preimage); + let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage); if payment_received_generated { inbound_payment.remove_entry(); } @@ -3465,10 +3471,10 @@ impl ChannelMana debug_assert!(false); return false; } - if let OnionPayload::Invoice(ref final_hop_data) = htlcs[0].onion_payload { + if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload { // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). // In this case we're not going to handle any timeouts of the parts here. - if final_hop_data.total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) { + if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) { return true; } else if htlcs.into_iter().any(|htlc| { htlc.timer_ticks += 1; @@ -6069,20 +6075,21 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { impl Writeable for ClaimableHTLC { fn write(&self, writer: &mut W) -> Result<(), io::Error> { let payment_data = match &self.onion_payload { - OnionPayload::Invoice(data) => Some(data.clone()), + OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data), _ => None, }; let keysend_preimage = match self.onion_payload { - OnionPayload::Invoice(_) => None, + OnionPayload::Invoice { .. } => None, OnionPayload::Spontaneous(preimage) => Some(preimage.clone()), }; - write_tlv_fields! - (writer, - { - (0, self.prev_hop, required), (2, self.value, required), - (4, payment_data, option), (6, self.cltv_expiry, required), - (8, keysend_preimage, option), - }); + write_tlv_fields!(writer, { + (0, self.prev_hop, required), + (1, self.total_msat, required), + (2, self.value, required), + (4, payment_data, option), + (6, self.cltv_expiry, required), + (8, keysend_preimage, option), + }); Ok(()) } } @@ -6093,32 +6100,41 @@ impl Readable for ClaimableHTLC { let mut value = 0; let mut payment_data: Option = None; let mut cltv_expiry = 0; + let mut total_msat = None; let mut keysend_preimage: Option = None; - read_tlv_fields! - (reader, - { - (0, prev_hop, required), (2, value, required), - (4, payment_data, option), (6, cltv_expiry, required), - (8, keysend_preimage, option) - }); + read_tlv_fields!(reader, { + (0, prev_hop, required), + (1, total_msat, option), + (2, value, required), + (4, payment_data, option), + (6, cltv_expiry, required), + (8, keysend_preimage, option) + }); let onion_payload = match keysend_preimage { Some(p) => { if payment_data.is_some() { return Err(DecodeError::InvalidValue) } + if total_msat.is_none() { + total_msat = Some(value); + } OnionPayload::Spontaneous(p) }, None => { if payment_data.is_none() { return Err(DecodeError::InvalidValue) } - OnionPayload::Invoice(payment_data.unwrap()) + if total_msat.is_none() { + total_msat = Some(payment_data.as_ref().unwrap().total_msat); + } + OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() } }, }; Ok(Self { prev_hop: prev_hop.0.unwrap(), timer_ticks: 0, value, + total_msat: total_msat.unwrap(), onion_payload, cltv_expiry, }) @@ -7319,7 +7335,7 @@ mod tests { // payment verification fails as expected. let mut bad_payment_hash = payment_hash.clone(); bad_payment_hash.0[0] += 1; - match inbound_payment::verify(bad_payment_hash, payment_data.clone(), nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) { + match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) { Ok(_) => panic!("Unexpected ok"), Err(()) => { nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1); @@ -7327,7 +7343,7 @@ mod tests { } // Check that using the original payment hash succeeds. - assert!(inbound_payment::verify(payment_hash, payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok()); + assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok()); } } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 9bbd6c0e..163c6cbe 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1167,6 +1167,21 @@ macro_rules! get_payment_preimage_hash { } } +#[macro_export] +macro_rules! get_route { + ($send_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{ + use $crate::chain::keysinterface::KeysInterface; + let scorer = $crate::util::test_utils::TestScorer::with_penalty(0); + let keys_manager = $crate::util::test_utils::TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet); + let random_seed_bytes = keys_manager.get_secure_random_bytes(); + $crate::routing::router::get_route( + &$send_node.node.get_our_node_id(), &$payment_params, &$send_node.network_graph.read_only(), + Some(&$send_node.node.list_usable_channels().iter().collect::>()), + $recv_value, $cltv, $send_node.logger, &scorer, &random_seed_bytes + ) + }} +} + #[cfg(test)] #[macro_export] macro_rules! get_route_and_payment_hash { @@ -1176,17 +1191,9 @@ macro_rules! get_route_and_payment_hash { $crate::get_route_and_payment_hash!($send_node, $recv_node, payment_params, $recv_value, TEST_FINAL_CLTV) }}; ($send_node: expr, $recv_node: expr, $payment_params: expr, $recv_value: expr, $cltv: expr) => {{ - use $crate::chain::keysinterface::KeysInterface; let (payment_preimage, payment_hash, payment_secret) = $crate::get_payment_preimage_hash!($recv_node, Some($recv_value)); - let scorer = $crate::util::test_utils::TestScorer::with_penalty(0); - let keys_manager = $crate::util::test_utils::TestKeysInterface::new(&[0u8; 32], bitcoin::network::constants::Network::Testnet); - let random_seed_bytes = keys_manager.get_secure_random_bytes(); - let route = $crate::routing::router::get_route( - &$send_node.node.get_our_node_id(), &$payment_params, &$send_node.network_graph.read_only(), - Some(&$send_node.node.list_usable_channels().iter().collect::>()), - $recv_value, $cltv, $send_node.logger, &scorer, &random_seed_bytes - ).unwrap(); - (route, payment_hash, payment_preimage, payment_secret) + let route = $crate::get_route!($send_node, $payment_params, $recv_value, $cltv); + (route.unwrap(), payment_hash, payment_preimage, payment_secret) }} } @@ -1650,15 +1657,7 @@ pub const TEST_FINAL_CLTV: u32 = 70; pub fn route_payment<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&Node<'a, 'b, 'c>], recv_value: u64) -> (PaymentPreimage, PaymentHash, PaymentSecret) { let payment_params = PaymentParameters::from_node_id(expected_route.last().unwrap().node.get_our_node_id()) .with_features(InvoiceFeatures::known()); - let network_graph = origin_node.network_graph.read_only(); - let scorer = test_utils::TestScorer::with_penalty(0); - let seed = [0u8; 32]; - let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet); - let random_seed_bytes = keys_manager.get_secure_random_bytes(); - let route = get_route( - &origin_node.node.get_our_node_id(), &payment_params, &network_graph, - Some(&origin_node.node.list_usable_channels().iter().collect::>()), - recv_value, TEST_FINAL_CLTV, origin_node.logger, &scorer, &random_seed_bytes).unwrap(); + let route = get_route!(origin_node, payment_params, recv_value, TEST_FINAL_CLTV).unwrap(); assert_eq!(route.paths.len(), 1); assert_eq!(route.paths[0].len(), expected_route.len()); for (node, hop) in expected_route.iter().zip(route.paths[0].iter()) { diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index d73cafda..73c3a86d 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9509,12 +9509,7 @@ fn test_forwardable_regen() { claim_payment(&nodes[0], &[&nodes[1], &nodes[2]], payment_preimage_2); } -#[test] -fn test_dup_htlc_second_fail_panic() { - // Previously, if we received two HTLCs back-to-back, where the second overran the expected - // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event. - // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed - // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed. +fn do_test_dup_htlc_second_rejected(test_for_second_fail_panic: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -9524,14 +9519,9 @@ fn test_dup_htlc_second_fail_panic() { let payment_params = PaymentParameters::from_node_id(nodes[1].node.get_our_node_id()) .with_features(InvoiceFeatures::known()); - let scorer = test_utils::TestScorer::with_penalty(0); - let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); - let route = get_route( - &nodes[0].node.get_our_node_id(), &payment_params, &nodes[0].network_graph.read_only(), - Some(&nodes[0].node.list_usable_channels().iter().collect::>()), - 10_000, TEST_FINAL_CLTV, nodes[0].logger, &scorer, &random_seed_bytes).unwrap(); + let route = get_route!(nodes[0], payment_params, 10_000, TEST_FINAL_CLTV).unwrap(); - let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); + let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[1]); { nodes[0].node.send_payment(&route, our_payment_hash, &Some(our_payment_secret)).unwrap(); @@ -9559,26 +9549,153 @@ fn test_dup_htlc_second_fail_panic() { // the first HTLC delivered above. } - // Now we go fail back the first HTLC from the user end. expect_pending_htlcs_forwardable_ignore!(nodes[1]); nodes[1].node.process_pending_htlc_forwards(); - nodes[1].node.fail_htlc_backwards(&our_payment_hash); - expect_pending_htlcs_forwardable_ignore!(nodes[1]); - nodes[1].node.process_pending_htlc_forwards(); + if test_for_second_fail_panic { + // Now we go fail back the first HTLC from the user end. + nodes[1].node.fail_htlc_backwards(&our_payment_hash); - check_added_monitors!(nodes[1], 1); - let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + + check_added_monitors!(nodes[1], 1); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert_eq!(fail_updates_1.update_fail_htlcs.len(), 2); + + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]); + commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); + + let failure_events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(failure_events.len(), 2); + if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } + if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } + } else { + // Let the second HTLC fail and claim the first + expect_pending_htlcs_forwardable_ignore!(nodes[1]); + nodes[1].node.process_pending_htlc_forwards(); + + check_added_monitors!(nodes[1], 1); + let fail_updates_1 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); + + expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); + + claim_payment(&nodes[0], &[&nodes[1]], our_payment_preimage); + } +} + +#[test] +fn test_dup_htlc_second_fail_panic() { + // Previously, if we received two HTLCs back-to-back, where the second overran the expected + // value for the payment, we'd fail back both HTLCs after generating a `PaymentReceived` event. + // Then, if the user failed the second payment, they'd hit a "tried to fail an already failed + // HTLC" debug panic. This tests for this behavior, checking that only one HTLC is auto-failed. + do_test_dup_htlc_second_rejected(true); +} + +#[test] +fn test_dup_htlc_second_rejected() { + // Test that if we receive a second HTLC for an MPP payment that overruns the payment amount we + // simply reject the second HTLC but are still able to claim the first HTLC. + do_test_dup_htlc_second_rejected(false); +} + +#[test] +fn test_inconsistent_mpp_params() { + // Test that if we recieve two HTLCs with different payment parameters we fail back the first + // such HTLC and allow the second to stay. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()); + + let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id()) + .with_features(InvoiceFeatures::known()); + let mut route = get_route!(nodes[0], payment_params, 15_000_000, TEST_FINAL_CLTV).unwrap(); + assert_eq!(route.paths.len(), 2); + route.paths.sort_by(|path_a, _| { + // Sort the path so that the path through nodes[1] comes first + if path_a[0].pubkey == nodes[1].node.get_our_node_id() { + core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater } + }); + let payment_params_opt = Some(payment_params); + + let (our_payment_preimage, our_payment_hash, our_payment_secret) = get_payment_preimage_hash!(&nodes[3]); + + let cur_height = nodes[0].best_block_info().1; + let payment_id = PaymentId([42; 32]); + { + nodes[0].node.send_payment_along_path(&route.paths[0], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), false, None); + } + assert!(nodes[3].node.get_and_clear_pending_events().is_empty()); + + { + nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 14_000_000, cur_height, payment_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + + nodes[2].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[0], payment_event.commitment_msg, false); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); - nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[1]); - commitment_signed_dance!(nodes[0], nodes[1], fail_updates_1.commitment_signed, false); + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors!(nodes[2], 1); + + let mut events = nodes[2].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + let payment_event = SendEvent::from_event(events.pop().unwrap()); + + nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + check_added_monitors!(nodes[3], 0); + commitment_signed_dance!(nodes[3], nodes[2], payment_event.commitment_msg, true, true); + + // At this point, nodes[3] should notice the two HTLCs don't contain the same total payment + // amount. It will assume the second is a privacy attack (no longer particularly relevant + // post-payment_secrets) and fail back the new HTLC. + } + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); + expect_pending_htlcs_forwardable_ignore!(nodes[3]); + nodes[3].node.process_pending_htlc_forwards(); + + check_added_monitors!(nodes[3], 1); + + let fail_updates_1 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id()); + nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false); + + expect_pending_htlcs_forwardable!(nodes[2]); + check_added_monitors!(nodes[2], 1); + + let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[2].node.get_our_node_id(), &fail_updates_2.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[2], fail_updates_2.commitment_signed, false); + + expect_payment_failed_conditions!(nodes[0], our_payment_hash, true, PaymentFailedConditions::new().mpp_parts_remain()); + + nodes[0].node.send_payment_along_path(&route.paths[1], &payment_params_opt, &our_payment_hash, &Some(our_payment_secret), 15_000_000, cur_height, payment_id, &None).unwrap(); + check_added_monitors!(nodes[0], 1); + + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(events.len(), 1); + pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); - let failure_events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(failure_events.len(), 2); - if let Event::PaymentPathFailed { .. } = failure_events[0] {} else { panic!(); } - if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); } + claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); } #[test] diff --git a/lightning/src/ln/inbound_payment.rs b/lightning/src/ln/inbound_payment.rs index 8ed77e5a..f4f114d9 100644 --- a/lightning/src/ln/inbound_payment.rs +++ b/lightning/src/ln/inbound_payment.rs @@ -200,7 +200,7 @@ fn construct_payment_secret(iv_bytes: &[u8; IV_LEN], metadata_bytes: &[u8; METAD /// [`KeysInterface::get_inbound_payment_key_material`]: crate::chain::keysinterface::KeysInterface::get_inbound_payment_key_material /// [`create_inbound_payment`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment /// [`create_inbound_payment_for_hash`]: crate::ln::channelmanager::ChannelManager::create_inbound_payment_for_hash -pub(super) fn verify(payment_hash: PaymentHash, payment_data: msgs::FinalOnionHopData, highest_seen_timestamp: u64, keys: &ExpandedKey, logger: &L) -> Result, ()> +pub(super) fn verify(payment_hash: PaymentHash, payment_data: &msgs::FinalOnionHopData, highest_seen_timestamp: u64, keys: &ExpandedKey, logger: &L) -> Result, ()> where L::Target: Logger { let (iv_bytes, metadata_bytes) = decrypt_metadata(payment_data.payment_secret, keys); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index c09df175..07300fdf 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -339,6 +339,7 @@ struct Peer { msgs_sent_since_pong: usize, awaiting_pong_timer_tick_intervals: i8, received_message_since_timer_tick: bool, + sent_gossip_timestamp_filter: bool, } impl Peer { @@ -348,7 +349,11 @@ impl Peer { /// announcements/updates for the given channel_id then we will send it when we get to that /// point and we shouldn't send it yet to avoid sending duplicate updates. If we've already /// sent the old versions, we should send the update, and so return true here. - fn should_forward_channel_announcement(&self, channel_id: u64)->bool{ + fn should_forward_channel_announcement(&self, channel_id: u64) -> bool { + if self.their_features.as_ref().unwrap().supports_gossip_queries() && + !self.sent_gossip_timestamp_filter { + return false; + } match self.sync_status { InitSyncTracker::NoSyncRequested => true, InitSyncTracker::ChannelsSyncing(i) => i < channel_id, @@ -358,6 +363,10 @@ impl Peer { /// Similar to the above, but for node announcements indexed by node_id. fn should_forward_node_announcement(&self, node_id: PublicKey) -> bool { + if self.their_features.as_ref().unwrap().supports_gossip_queries() && + !self.sent_gossip_timestamp_filter { + return false; + } match self.sync_status { InitSyncTracker::NoSyncRequested => true, InitSyncTracker::ChannelsSyncing(_) => false, @@ -619,6 +628,7 @@ impl P msgs_sent_since_pong: 0, awaiting_pong_timer_tick_intervals: 0, received_message_since_timer_tick: false, + sent_gossip_timestamp_filter: false, }).is_some() { panic!("PeerManager driver duplicated descriptors!"); }; @@ -665,6 +675,7 @@ impl P msgs_sent_since_pong: 0, awaiting_pong_timer_tick_intervals: 0, received_message_since_timer_tick: false, + sent_gossip_timestamp_filter: false, }).is_some() { panic!("PeerManager driver duplicated descriptors!"); }; @@ -1058,7 +1069,8 @@ impl P log_info!(self.logger, "Received peer Init message from {}: {}", log_pubkey!(peer.their_node_id.unwrap()), msg.features); - if msg.features.initial_routing_sync() { + // For peers not supporting gossip queries start sync now, otherwise wait until we receive a filter. + if msg.features.initial_routing_sync() && !msg.features.supports_gossip_queries() { peer.sync_status = InitSyncTracker::ChannelsSyncing(0); } if !msg.features.supports_static_remote_key() { @@ -1205,7 +1217,13 @@ impl P self.message_handler.route_handler.handle_reply_channel_range(&peer.their_node_id.unwrap(), msg)?; }, wire::Message::GossipTimestampFilter(_msg) => { - // TODO: handle message + // When supporting gossip messages, start inital gossip sync only after we receive + // a GossipTimestampFilter + if peer.their_features.as_ref().unwrap().supports_gossip_queries() && + !peer.sent_gossip_timestamp_filter { + peer.sent_gossip_timestamp_filter = true; + peer.sync_status = InitSyncTracker::ChannelsSyncing(0); + } }, // Unknown messages: @@ -1803,6 +1821,8 @@ mod tests { assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); peer_b.process_events(); assert_eq!(peer_a.read_event(&mut fd_a, &fd_b.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); + peer_a.process_events(); + assert_eq!(peer_b.read_event(&mut fd_b, &fd_a.outbound_data.lock().unwrap().split_off(0)).unwrap(), false); (fd_a.clone(), fd_b.clone()) } @@ -1866,21 +1886,21 @@ mod tests { let (mut fd_a, mut fd_b) = establish_connection(&peers[0], &peers[1]); // Make each peer to read the messages that the other peer just wrote to them. Note that - // due to the max-messagse-before-ping limits this may take a few iterations to complete. + // due to the max-message-before-ping limits this may take a few iterations to complete. for _ in 0..150/super::BUFFER_DRAIN_MSGS_PER_TICK + 1 { - peers[0].process_events(); - let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0); - assert!(!b_read_data.is_empty()); - - peers[1].read_event(&mut fd_b, &b_read_data).unwrap(); peers[1].process_events(); - let a_read_data = fd_b.outbound_data.lock().unwrap().split_off(0); assert!(!a_read_data.is_empty()); + peers[0].read_event(&mut fd_a, &a_read_data).unwrap(); + peers[0].process_events(); - peers[1].process_events(); - assert_eq!(fd_b.outbound_data.lock().unwrap().len(), 0, "Until B receives data, it shouldn't send more messages"); + let b_read_data = fd_a.outbound_data.lock().unwrap().split_off(0); + assert!(!b_read_data.is_empty()); + peers[1].read_event(&mut fd_b, &b_read_data).unwrap(); + + peers[0].process_events(); + assert_eq!(fd_a.outbound_data.lock().unwrap().len(), 0, "Until A receives data, it shouldn't send more messages"); } // Check that each peer has received the expected number of channel updates and channel diff --git a/lightning/src/routing/scoring.rs b/lightning/src/routing/scoring.rs index 0e04c639..3d10a14c 100644 --- a/lightning/src/routing/scoring.rs +++ b/lightning/src/routing/scoring.rs @@ -623,6 +623,33 @@ impl, L: Deref, T: Time> ProbabilisticScorerUsin assert!(self.channel_liquidities.insert(short_channel_id, liquidity).is_none()); self } + + /// Dump the contents of this scorer into the configured logger. + /// + /// Note that this writes roughly one line per channel for which we have a liquidity estimate, + /// which may be a substantial amount of log output. + pub fn debug_log_liquidity_stats(&self) { + let graph = self.network_graph.read_only(); + for (scid, liq) in self.channel_liquidities.iter() { + if let Some(chan_debug) = graph.channels().get(scid) { + let log_direction = |source, target| { + if let Some((directed_info, _)) = chan_debug.as_directed_to(target) { + let amt = directed_info.effective_capacity().as_msat(); + let dir_liq = liq.as_directed(source, target, amt, self.params.liquidity_offset_half_life); + log_debug!(self.logger, "Liquidity from {:?} to {:?} via {} is in the range ({}, {})", + source, target, scid, dir_liq.min_liquidity_msat(), dir_liq.max_liquidity_msat()); + } else { + log_debug!(self.logger, "No amount known for SCID {} from {:?} to {:?}", scid, source, target); + } + }; + + log_direction(&chan_debug.node_one, &chan_debug.node_two); + log_direction(&chan_debug.node_two, &chan_debug.node_one); + } else { + log_debug!(self.logger, "No network graph entry for SCID {}", scid); + } + } + } } impl ProbabilisticScoringParameters { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 3c36cdf0..f6872430 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -49,6 +49,9 @@ use core::{cmp, mem}; use bitcoin::bech32::u5; use chain::keysinterface::{InMemorySigner, Recipient, KeyMaterial}; +#[cfg(feature = "std")] +use std::time::{SystemTime, UNIX_EPOCH}; + pub struct TestVecWriter(pub Vec); impl Writer for TestVecWriter { fn write_all(&mut self, buf: &[u8]) -> Result<(), io::Error> { @@ -341,6 +344,7 @@ fn get_dummy_channel_update(short_chan_id: u64) -> msgs::ChannelUpdate { pub struct TestRoutingMessageHandler { pub chan_upds_recvd: AtomicUsize, pub chan_anns_recvd: AtomicUsize, + pub pending_events: Mutex>, pub request_full_sync: AtomicBool, } @@ -349,6 +353,7 @@ impl TestRoutingMessageHandler { TestRoutingMessageHandler { chan_upds_recvd: AtomicUsize::new(0), chan_anns_recvd: AtomicUsize::new(0), + pending_events: Mutex::new(vec![]), request_full_sync: AtomicBool::new(false), } } @@ -384,7 +389,35 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { Vec::new() } - fn peer_connected(&self, _their_node_id: &PublicKey, _init_msg: &msgs::Init) {} + fn peer_connected(&self, their_node_id: &PublicKey, init_msg: &msgs::Init) { + if !init_msg.features.supports_gossip_queries() { + return (); + } + + let should_request_full_sync = self.request_full_sync.load(Ordering::Acquire); + + #[allow(unused_mut, unused_assignments)] + let mut gossip_start_time = 0; + #[cfg(feature = "std")] + { + gossip_start_time = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs(); + if should_request_full_sync { + gossip_start_time -= 60 * 60 * 24 * 7 * 2; // 2 weeks ago + } else { + gossip_start_time -= 60 * 60; // an hour ago + } + } + + let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push(events::MessageSendEvent::SendGossipTimestampFilter { + node_id: their_node_id.clone(), + msg: msgs::GossipTimestampFilter { + chain_hash: genesis_block(Network::Testnet).header.block_hash(), + first_timestamp: gossip_start_time as u32, + timestamp_range: u32::max_value(), + }, + }); + } fn handle_reply_channel_range(&self, _their_node_id: &PublicKey, _msg: msgs::ReplyChannelRange) -> Result<(), msgs::LightningError> { Ok(()) @@ -405,7 +438,10 @@ impl msgs::RoutingMessageHandler for TestRoutingMessageHandler { impl events::MessageSendEventsProvider for TestRoutingMessageHandler { fn get_and_clear_pending_msg_events(&self) -> Vec { - vec![] + let mut ret = Vec::new(); + let mut pending_events = self.pending_events.lock().unwrap(); + core::mem::swap(&mut ret, &mut pending_events); + ret } }