From: Jeffrey Czyz Date: Thu, 27 Jun 2024 19:39:26 +0000 (-0500) Subject: Merge pull request #3129 from optout21/splicing-msgs-update X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=commitdiff_plain;h=3ccf06416091e107f443ee92027501105c48054b;hp=43a6ee34d7e78e161434f842da430190bbfba0e2;p=rust-lightning Merge pull request #3129 from optout21/splicing-msgs-update Update splice messages according to new spec draft --- diff --git a/ci/check-cfg-flags.py b/ci/check-cfg-flags.py index c33e8aa3..d73bf50a 100755 --- a/ci/check-cfg-flags.py +++ b/ci/check-cfg-flags.py @@ -104,6 +104,8 @@ def check_cfg_tag(cfg): pass elif cfg == "splicing": pass + elif cfg == "async_payments": + pass else: print("Bad cfg tag: " + cfg) assert False diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 0dc654d8..d0ba7c7f 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -179,3 +179,5 @@ RUSTFLAGS="--cfg=async_signing" cargo test --verbose --color always -p lightning RUSTFLAGS="--cfg=dual_funding" cargo test --verbose --color always -p lightning [ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean RUSTFLAGS="--cfg=splicing" cargo test --verbose --color always -p lightning +[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean +RUSTFLAGS="--cfg=async_payments" cargo test --verbose --color always -p lightning diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index c4d179e7..423e69eb 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -396,12 +396,7 @@ impl SignerProvider for KeyProvider { let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?; let state = self.make_enforcement_state_cell(inner.commitment_seed); - Ok(TestChannelSigner { - inner, - state, - disable_revocation_policy_check: false, - available: Arc::new(Mutex::new(true)), - }) + Ok(TestChannelSigner::new_with_revoked(inner, state, false)) } fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result { diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 2807b6dd..a2ae07a2 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -1366,8 +1366,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // Three feerate requests to check dust exposure + ext_from_hex("00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000) // we respond with commitment_signed then revoke_and_ack (a weird, but valid, order) @@ -1478,8 +1478,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // Three feerate requests to check dust exposure + ext_from_hex("00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc @@ -1602,8 +1602,8 @@ mod tests { // process the now-pending HTLC forward ext_from_hex("07", &mut test); - // Two feerate requests to check dust exposure - ext_from_hex("00fd00fd", &mut test); + // Three feerate requests to check dust exposure + ext_from_hex("00fd00fd00fd", &mut test); // client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate) // connect a block with one transaction of len 125 diff --git a/fuzz/src/onion_message.rs b/fuzz/src/onion_message.rs index a8a290fe..e60d13d5 100644 --- a/fuzz/src/onion_message.rs +++ b/fuzz/src/onion_message.rs @@ -12,6 +12,9 @@ use lightning::ln::msgs::{self, DecodeError, OnionMessageHandler}; use lightning::ln::script::ShutdownScript; use lightning::offers::invoice::UnsignedBolt12Invoice; use lightning::offers::invoice_request::UnsignedInvoiceRequest; +use lightning::onion_message::async_payments::{ + AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc, +}; use lightning::onion_message::messenger::{ CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage, Responder, ResponseInstruction, @@ -39,6 +42,7 @@ pub fn do_test(data: &[u8], logger: &L) { let node_id_lookup = EmptyNodeIdLookUp {}; let message_router = TestMessageRouter {}; let offers_msg_handler = TestOffersMessageHandler {}; + let async_payments_msg_handler = TestAsyncPaymentsMessageHandler {}; let custom_msg_handler = TestCustomMessageHandler {}; let onion_messenger = OnionMessenger::new( &keys_manager, @@ -47,6 +51,7 @@ pub fn do_test(data: &[u8], logger: &L) { &node_id_lookup, &message_router, &offers_msg_handler, + &async_payments_msg_handler, &custom_msg_handler, ); @@ -105,6 +110,22 @@ impl OffersMessageHandler for TestOffersMessageHandler { } } +struct TestAsyncPaymentsMessageHandler {} + +impl AsyncPaymentsMessageHandler for TestAsyncPaymentsMessageHandler { + fn held_htlc_available( + &self, message: HeldHtlcAvailable, responder: Option, + ) -> ResponseInstruction { + let responder = match responder { + Some(resp) => resp, + None => return ResponseInstruction::NoResponse, + }; + responder + .respond(ReleaseHeldHtlc { payment_release_secret: message.payment_release_secret }) + } + fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {} +} + #[derive(Debug)] struct TestCustomMessage {} diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 4719d7e5..a355e2b0 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -568,7 +568,7 @@ use core::task; /// # type NetworkGraph = lightning::routing::gossip::NetworkGraph>; /// # type P2PGossipSync
    = lightning::routing::gossip::P2PGossipSync, Arc
      , Arc>; /// # type ChannelManager = lightning::ln::channelmanager::SimpleArcChannelManager, B, FE, Logger>; -/// # type OnionMessenger = lightning::onion_message::messenger::OnionMessenger, Arc, Arc, Arc>, Arc, Arc, Arc>>, Arc>, lightning::ln::peer_handler::IgnoringMessageHandler>; +/// # type OnionMessenger = lightning::onion_message::messenger::OnionMessenger, Arc, Arc, Arc>, Arc, Arc, Arc>>, Arc>, lightning::ln::peer_handler::IgnoringMessageHandler, lightning::ln::peer_handler::IgnoringMessageHandler>; /// # type Scorer = RwLock, Arc>>; /// # type PeerManager = lightning::ln::peer_handler::SimpleArcPeerManager, B, FE, Arc
        , Logger>; /// # @@ -996,7 +996,7 @@ mod tests { type PGS = Arc>>, Arc, Arc>>; type RGS = Arc>>, Arc>>; - type OM = OnionMessenger, Arc, Arc, Arc, Arc>>, Arc, Arc>>, IgnoringMessageHandler, IgnoringMessageHandler>; + type OM = OnionMessenger, Arc, Arc, Arc, Arc>>, Arc, Arc>>, IgnoringMessageHandler, Arc, IgnoringMessageHandler>; struct Node { node: Arc, @@ -1291,7 +1291,7 @@ mod tests { let best_block = BestBlock::from_network(network); let params = ChainParameters { network, best_block }; let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time)); - let messenger = Arc::new(OnionMessenger::new(keys_manager.clone(), keys_manager.clone(), logger.clone(), manager.clone(), msg_router.clone(), IgnoringMessageHandler {}, IgnoringMessageHandler {})); + let messenger = Arc::new(OnionMessenger::new(keys_manager.clone(), keys_manager.clone(), logger.clone(), manager.clone(), msg_router.clone(), IgnoringMessageHandler {}, manager.clone(), IgnoringMessageHandler {})); let wallet = Arc::new(TestWallet {}); let sweeper = Arc::new(OutputSweeper::new(best_block, Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator), None::>, Arc::clone(&keys_manager), wallet, Arc::clone(&kv_store), Arc::clone(&logger))); diff --git a/lightning-rapid-gossip-sync/src/lib.rs b/lightning-rapid-gossip-sync/src/lib.rs index ecc52553..c6951301 100644 --- a/lightning-rapid-gossip-sync/src/lib.rs +++ b/lightning-rapid-gossip-sync/src/lib.rs @@ -1,6 +1,5 @@ #![deny(rustdoc::broken_intra_doc_links)] #![deny(rustdoc::private_intra_doc_links)] - #![deny(missing_docs)] #![deny(unsafe_code)] #![deny(non_upper_case_globals)] @@ -63,15 +62,16 @@ #![cfg_attr(all(not(feature = "std"), not(test)), no_std)] -#[cfg(ldk_bench)] extern crate criterion; +#[cfg(ldk_bench)] +extern crate criterion; #[cfg(not(feature = "std"))] extern crate alloc; -#[cfg(feature = "std")] -use std::fs::File; use core::ops::Deref; use core::sync::atomic::{AtomicBool, Ordering}; +#[cfg(feature = "std")] +use std::fs::File; use lightning::io; use lightning::ln::msgs::{DecodeError, LightningError}; @@ -121,21 +121,22 @@ impl From for GraphSyncError { /// See [crate-level documentation] for usage. /// /// [crate-level documentation]: crate -pub struct RapidGossipSync>, L: Deref> -where L::Target: Logger { +pub struct RapidGossipSync>, L: Deref> +where + L::Target: Logger, +{ network_graph: NG, logger: L, - is_initial_sync_complete: AtomicBool + is_initial_sync_complete: AtomicBool, } -impl>, L: Deref> RapidGossipSync where L::Target: Logger { +impl>, L: Deref> RapidGossipSync +where + L::Target: Logger, +{ /// Instantiate a new [`RapidGossipSync`] instance. pub fn new(network_graph: NG, logger: L) -> Self { - Self { - network_graph, - logger, - is_initial_sync_complete: AtomicBool::new(false) - } + Self { network_graph, logger, is_initial_sync_complete: AtomicBool::new(false) } } /// Sync gossip data from a file. @@ -147,8 +148,7 @@ impl>, L: Deref> RapidGossipSync where L /// #[cfg(feature = "std")] pub fn sync_network_graph_with_file_path( - &self, - sync_path: &str, + &self, sync_path: &str, ) -> Result { let mut file = File::open(sync_path)?; self.update_network_graph_from_byte_stream(&mut file) @@ -169,7 +169,9 @@ impl>, L: Deref> RapidGossipSync where L /// /// `update_data`: `&[u8]` binary stream that comprises the update data /// `current_time_unix`: `Option` optional current timestamp to verify data age - pub fn update_network_graph_no_std(&self, update_data: &[u8], current_time_unix: Option) -> Result { + pub fn update_network_graph_no_std( + &self, update_data: &[u8], current_time_unix: Option, + ) -> Result { let mut read_cursor = io::Cursor::new(update_data); self.update_network_graph_from_byte_stream_no_std(&mut read_cursor, current_time_unix) } @@ -195,10 +197,10 @@ mod tests { use bitcoin::Network; + use crate::{GraphSyncError, RapidGossipSync}; use lightning::ln::msgs::DecodeError; use lightning::routing::gossip::NetworkGraph; use lightning::util::test_utils::TestLogger; - use crate::{GraphSyncError, RapidGossipSync}; #[test] fn test_sync_from_file() { @@ -294,8 +296,7 @@ mod tests { let rapid_sync = RapidGossipSync::new(&network_graph, &logger); let start = std::time::Instant::now(); - let sync_result = rapid_sync - .sync_network_graph_with_file_path("./res/full_graph.lngossip"); + let sync_result = rapid_sync.sync_network_graph_with_file_path("./res/full_graph.lngossip"); if let Err(GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result { let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-285cb27df79-2022-07-21.bin\n\n{:?}", io_error); #[cfg(not(require_route_graph_test))] diff --git a/lightning-rapid-gossip-sync/src/processing.rs b/lightning-rapid-gossip-sync/src/processing.rs index b996cfac..c7b8a832 100644 --- a/lightning-rapid-gossip-sync/src/processing.rs +++ b/lightning-rapid-gossip-sync/src/processing.rs @@ -5,12 +5,15 @@ use core::sync::atomic::Ordering; use bitcoin::blockdata::constants::ChainHash; use bitcoin::secp256k1::PublicKey; -use lightning::ln::msgs::{DecodeError, ErrorAction, LightningError, SocketAddress, UnsignedChannelUpdate, UnsignedNodeAnnouncement}; +use lightning::io; +use lightning::ln::msgs::{ + DecodeError, ErrorAction, LightningError, SocketAddress, UnsignedChannelUpdate, + UnsignedNodeAnnouncement, +}; use lightning::routing::gossip::{NetworkGraph, NodeAlias, NodeId}; use lightning::util::logger::Logger; -use lightning::{log_debug, log_warn, log_trace, log_given_level, log_gossip}; use lightning::util::ser::{BigSize, FixedLengthReader, Readable}; -use lightning::io; +use lightning::{log_debug, log_given_level, log_gossip, log_trace, log_warn}; use crate::{GraphSyncError, RapidGossipSync}; @@ -18,7 +21,7 @@ use crate::{GraphSyncError, RapidGossipSync}; use std::time::{SystemTime, UNIX_EPOCH}; #[cfg(all(not(feature = "std"), not(test)))] -use alloc::{vec::Vec, borrow::ToOwned}; +use alloc::{borrow::ToOwned, vec::Vec}; use lightning::ln::features::NodeFeatures; /// The purpose of this prefix is to identify the serialization format, should other rapid gossip @@ -35,11 +38,13 @@ const MAX_INITIAL_NODE_ID_VECTOR_CAPACITY: u32 = 50_000; /// suggestion. const STALE_RGS_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14; -impl>, L: Deref> RapidGossipSync where L::Target: Logger { +impl>, L: Deref> RapidGossipSync +where + L::Target: Logger, +{ #[cfg(feature = "std")] pub(crate) fn update_network_graph_from_byte_stream( - &self, - read_cursor: &mut R, + &self, read_cursor: &mut R, ) -> Result { #[allow(unused_mut, unused_assignments)] let mut current_time_unix = None; @@ -47,15 +52,18 @@ impl>, L: Deref> RapidGossipSync where L { // Note that many tests rely on being able to set arbitrarily old timestamps, thus we // disable this check during tests! - current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs()); + current_time_unix = Some( + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("Time must be > 1970") + .as_secs(), + ); } self.update_network_graph_from_byte_stream_no_std(read_cursor, current_time_unix) } pub(crate) fn update_network_graph_from_byte_stream_no_std( - &self, - mut read_cursor: &mut R, - current_time_unix: Option + &self, mut read_cursor: &mut R, current_time_unix: Option, ) -> Result { log_trace!(self.logger, "Processing RGS data..."); let mut protocol_prefix = [0u8; 3]; @@ -75,19 +83,24 @@ impl>, L: Deref> RapidGossipSync where L let chain_hash: ChainHash = Readable::read(read_cursor)?; let ng_chain_hash = self.network_graph.get_chain_hash(); if chain_hash != ng_chain_hash { - return Err( - LightningError { - err: "Rapid Gossip Sync data's chain hash does not match the network graph's".to_owned(), - action: ErrorAction::IgnoreError, - }.into() - ); + return Err(LightningError { + err: "Rapid Gossip Sync data's chain hash does not match the network graph's" + .to_owned(), + action: ErrorAction::IgnoreError, + } + .into()); } let latest_seen_timestamp: u32 = Readable::read(read_cursor)?; if let Some(time) = current_time_unix { - if (latest_seen_timestamp as u64) < time.saturating_sub(STALE_RGS_UPDATE_AGE_LIMIT_SECS) { - return Err(LightningError{err: "Rapid Gossip Sync data is more than two weeks old".to_owned(), action: ErrorAction::IgnoreError}.into()); + if (latest_seen_timestamp as u64) < time.saturating_sub(STALE_RGS_UPDATE_AGE_LIMIT_SECS) + { + return Err(LightningError { + err: "Rapid Gossip Sync data is more than two weeks old".to_owned(), + action: ErrorAction::IgnoreError, + } + .into()); } } @@ -157,7 +170,8 @@ impl>, L: Deref> RapidGossipSync where L excess_data: Vec::new(), }; - read_only_network_graph.nodes() + read_only_network_graph + .nodes() .get(¤t_node_id) .and_then(|node| node.announcement_info.as_ref()) .map(|info| { @@ -172,7 +186,8 @@ impl>, L: Deref> RapidGossipSync where L let mut node_addresses: Vec = Vec::new(); for address_index in 0..address_count { let current_byte_count: u8 = Readable::read(read_cursor)?; - let mut address_reader = FixedLengthReader::new(&mut read_cursor, current_byte_count as u64); + let mut address_reader = + FixedLengthReader::new(&mut read_cursor, current_byte_count as u64); if let Ok(current_address) = Readable::read(&mut address_reader) { node_addresses.push(current_address); if address_reader.bytes_remain() { @@ -183,7 +198,8 @@ impl>, L: Deref> RapidGossipSync where L log_gossip!( self.logger, "Failure to parse address at index {} for node ID {}", - address_index, current_node_id + address_index, + current_node_id ); address_reader.eat_remaining()?; } @@ -209,7 +225,11 @@ impl>, L: Deref> RapidGossipSync where L if has_additional_data { let additional_data: Vec = Readable::read(read_cursor)?; - log_gossip!(self.logger, "Ignoring {} bytes of additional data in node announcement", additional_data.len()); + log_gossip!( + self.logger, + "Ignoring {} bytes of additional data in node announcement", + additional_data.len() + ); } } } else { @@ -226,15 +246,15 @@ impl>, L: Deref> RapidGossipSync where L // handle SCID let scid_delta: BigSize = Readable::read(read_cursor)?; - let short_channel_id = previous_scid - .checked_add(scid_delta.0) - .ok_or(DecodeError::InvalidValue)?; + let short_channel_id = + previous_scid.checked_add(scid_delta.0).ok_or(DecodeError::InvalidValue)?; previous_scid = short_channel_id; let node_id_1_index: BigSize = Readable::read(read_cursor)?; let mut node_id_2_index: BigSize = Readable::read(read_cursor)?; let has_additional_data = (node_id_2_index.0 & (1 << 63)) > 0; - node_id_2_index.0 &= !(1 << 63); // ensure 63rd bit isn't set + // ensure 63rd bit isn't set + node_id_2_index.0 &= !(1 << 63); if max(node_id_1_index.0, node_id_2_index.0) >= node_id_count as u64 { return Err(DecodeError::InvalidValue.into()); @@ -242,8 +262,12 @@ impl>, L: Deref> RapidGossipSync where L let node_id_1 = node_ids[node_id_1_index.0 as usize]; let node_id_2 = node_ids[node_id_2_index.0 as usize]; - log_gossip!(self.logger, "Adding channel {} from RGS announcement at {}", - short_channel_id, latest_seen_timestamp); + log_gossip!( + self.logger, + "Adding channel {} from RGS announcement at {}", + short_channel_id, + latest_seen_timestamp + ); let announcement_result = network_graph.add_channel_from_partial_announcement( short_channel_id, @@ -256,7 +280,11 @@ impl>, L: Deref> RapidGossipSync where L if let ErrorAction::IgnoreDuplicateGossip = lightning_error.action { // everything is fine, just a duplicate channel announcement } else { - log_warn!(self.logger, "Failed to process channel announcement: {:?}", lightning_error); + log_warn!( + self.logger, + "Failed to process channel announcement: {:?}", + lightning_error + ); return Err(lightning_error.into()); } } @@ -264,25 +292,35 @@ impl>, L: Deref> RapidGossipSync where L if version >= 2 && has_additional_data { // forwards compatibility let additional_data: Vec = Readable::read(read_cursor)?; - log_gossip!(self.logger, "Ignoring {} bytes of additional data in channel announcement", additional_data.len()); + log_gossip!( + self.logger, + "Ignoring {} bytes of additional data in channel announcement", + additional_data.len() + ); } } for modification in node_modifications { match network_graph.update_node_from_unsigned_announcement(&modification) { - Ok(_) => {} - Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {} + Ok(_) => {}, + Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {}, Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => { - log_given_level!(self.logger, level, "Failed to apply node announcement: {:?}", err); - } + log_given_level!( + self.logger, + level, + "Failed to apply node announcement: {:?}", + err + ); + }, Err(LightningError { action: ErrorAction::IgnoreError, err }) => { log_gossip!(self.logger, "Failed to apply node announcement: {:?}", err); - } + }, Err(e) => return Err(e.into()), } } - previous_scid = 0; // updates start at a new scid + // updates start at a new scid + previous_scid = 0; let update_count: u32 = Readable::read(read_cursor)?; log_debug!(self.logger, "Processing RGS update from {} with {} nodes, {} channel announcements and {} channel updates.", @@ -302,9 +340,8 @@ impl>, L: Deref> RapidGossipSync where L for _ in 0..update_count { let scid_delta: BigSize = Readable::read(read_cursor)?; - let short_channel_id = previous_scid - .checked_add(scid_delta.0) - .ok_or(DecodeError::InvalidValue)?; + let short_channel_id = + previous_scid.checked_add(scid_delta.0).ok_or(DecodeError::InvalidValue)?; previous_scid = short_channel_id; let channel_flags: u8 = Readable::read(read_cursor)?; @@ -317,7 +354,11 @@ impl>, L: Deref> RapidGossipSync where L if scid_delta.0 == 0 && is_same_direction_update { // this is additional data for forwards compatibility let additional_data: Vec = Readable::read(read_cursor)?; - log_gossip!(self.logger, "Ignoring {} bytes of additional data in channel update", additional_data.len()); + log_gossip!( + self.logger, + "Ignoring {} bytes of additional data in channel update", + additional_data.len() + ); continue; } } @@ -343,15 +384,17 @@ impl>, L: Deref> RapidGossipSync where L if (channel_flags & 0b_1000_0000) != 0 { // incremental update, field flags will indicate mutated values let read_only_network_graph = network_graph.read_only(); - if let Some(directional_info) = - read_only_network_graph.channels().get(&short_channel_id) + if let Some(directional_info) = read_only_network_graph + .channels() + .get(&short_channel_id) .and_then(|channel| channel.get_directional_info(channel_flags)) { synthetic_update.cltv_expiry_delta = directional_info.cltv_expiry_delta; synthetic_update.htlc_minimum_msat = directional_info.htlc_minimum_msat; synthetic_update.htlc_maximum_msat = directional_info.htlc_maximum_msat; synthetic_update.fee_base_msat = directional_info.fees.base_msat; - synthetic_update.fee_proportional_millionths = directional_info.fees.proportional_millionths; + synthetic_update.fee_proportional_millionths = + directional_info.fees.proportional_millionths; } else { log_trace!(self.logger, "Skipping application of channel update for chan {} with flags {} as original data is missing.", @@ -389,13 +432,23 @@ impl>, L: Deref> RapidGossipSync where L continue; } - log_gossip!(self.logger, "Updating channel {} with flags {} from RGS announcement at {}", - short_channel_id, channel_flags, latest_seen_timestamp); + log_gossip!( + self.logger, + "Updating channel {} with flags {} from RGS announcement at {}", + short_channel_id, + channel_flags, + latest_seen_timestamp + ); match network_graph.update_channel_unsigned(&synthetic_update) { Ok(_) => {}, Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {}, Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => { - log_given_level!(self.logger, level, "Failed to apply channel update: {:?}", err); + log_given_level!( + self.logger, + level, + "Failed to apply channel update: {:?}", + err + ); }, Err(LightningError { action: ErrorAction::IgnoreError, .. }) => {}, Err(e) => return Err(e.into()), @@ -429,21 +482,20 @@ mod tests { use crate::{GraphSyncError, RapidGossipSync}; const VALID_RGS_BINARY: [u8; 300] = [ - 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, - 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218, - 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251, - 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125, - 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136, - 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106, - 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138, - 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175, - 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128, - 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68, - 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232, - 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0, - 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1, - 0, 0, 1, + 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79, + 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218, 0, 0, 0, + 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251, 187, 172, 38, + 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125, 157, 176, 223, + 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136, 88, 216, 115, 11, + 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106, 204, 131, 186, 35, + 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138, 181, 64, 187, 103, 127, + 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175, 110, 32, 237, 0, 217, 90, + 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128, 76, 97, 0, 0, 0, 2, 0, 0, 255, + 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68, 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, + 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232, 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, + 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, + 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0, 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, + 216, 255, 2, 68, 226, 0, 6, 11, 0, 1, 0, 0, 1, ]; const VALID_BINARY_TIMESTAMP: u64 = 1642291930; @@ -489,15 +541,15 @@ mod tests { 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 97, 206, 240, 0, 0, 0, 0, 2, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, - 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 38, 4, 1, + 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 0, 2, 3, 0, 4, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5, 10, 103, 111, 111, 103, 108, - 101, 46, 99, 111, 109, 1, 187, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, - 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 2, 23, 48, 62, 77, 75, 108, 209, - 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, - 51, 116, 196, 81, 167, 37, 77, 7, 102, 0, 0, 2, 25, 48, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, - 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 1 + 1, 1, 1, 0, 2, 3, 0, 4, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5, 10, 103, 111, 111, 103, + 108, 101, 46, 99, 111, 109, 1, 187, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 2, 23, 48, 62, 77, 75, 108, + 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, + 66, 51, 116, 196, 81, 167, 37, 77, 7, 102, 0, 0, 2, 25, 48, 0, 0, 0, 1, 0, 0, 1, 0, 1, + 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 0, 0, 1, ]; let rapid_sync = RapidGossipSync::new(&network_graph, &logger); @@ -510,7 +562,11 @@ mod tests { { // address node - let node_id = NodeId::from_slice(&[3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143]).unwrap(); + let node_id = NodeId::from_slice(&[ + 3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, + 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, + ]) + .unwrap(); let node = nodes.get(&node_id).unwrap(); let announcement_info = node.announcement_info.as_ref().unwrap(); let addresses = announcement_info.addresses(); @@ -519,7 +575,11 @@ mod tests { { // feature node - let node_id = NodeId::from_slice(&[2, 77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102]).unwrap(); + let node_id = NodeId::from_slice(&[ + 2, 77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, + 234, 216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102, + ]) + .unwrap(); let node = nodes.get(&node_id).unwrap(); let announcement_info = node.announcement_info.as_ref().unwrap(); let features = announcement_info.features(); @@ -527,7 +587,11 @@ mod tests { // assert_eq!(addresses.len(), 5); } - logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Failed to apply node announcement", 0); + logger.assert_log_contains( + "lightning_rapid_gossip_sync::processing", + "Failed to apply node announcement", + 0, + ); } #[test] @@ -538,13 +602,25 @@ mod tests { let rapid_sync = RapidGossipSync::new(&network_graph, &logger); let example_input = vec![ - 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 105, 183, 240, 0, 0, 0, 0, 1, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 57, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 14, 5, 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 2, 23, 48, 0, 0, 0, 0, 0, 0, 0, 0, + 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, + 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 105, 183, + 240, 0, 0, 0, 0, 1, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, + 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 13, + 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 19, 2, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 57, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 14, 5, + 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 2, 23, 48, 0, 0, 0, 0, + 0, 0, 0, 0, ]; let update_result = rapid_sync.update_network_graph_no_std(&example_input[..], None); assert!(update_result.is_ok()); - logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Failed to apply node announcement: \"No existing channels for node_announcement\"", 1); + logger.assert_log_contains( + "lightning_rapid_gossip_sync::processing", + "Failed to apply node announcement: \"No existing channels for node_announcement\"", + 1, + ); } #[test] @@ -558,11 +634,12 @@ mod tests { 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 106, 12, 80, 1, 0, 2, 23, 48, 0, 0, 0, 3, 143, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, - 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, - 5, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5, - 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 255, 0, 0, 0, 0, 0, 0, 0, + 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, + 143, 5, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, + 14, 5, 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 255, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -570,10 +647,10 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 77, 75, 108, 209, 54, 16, + 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116, + 196, 81, 167, 37, 77, 7, 102, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 77, 75, 108, 209, 54, 16, 50, 202, - 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116, 196, 81, - 167, 37, 77, 7, 102, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -581,10 +658,10 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 186, 83, 31, 230, 6, 129, 52, 80, 61, 39, 35, 19, 50, 39, 200, + 103, 172, 143, 166, 200, 60, 83, 126, 154, 68, 195, 197, 189, 189, 203, 31, 227, 55, 0, + 2, 22, 49, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 186, 83, 31, 230, 6, 129, 52, 80, 61, 39, 35, 19, 50, 39, 200, 103, 172, 143, - 166, 200, 60, 83, 126, 154, 68, 195, 197, 189, 189, 203, 31, 227, 55, 0, 2, 22, 49, 0, - 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -592,26 +669,37 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, - 0, 0, 1, 0, 255, 128, 0, 0, 0, 0, 0, 0, 1, 0, 147, 23, 23, 23, 23, 23, 23, 23, 23, 23, + 0, 0, 0, 1, 0, 0, 1, 0, 255, 128, 0, 0, 0, 0, 0, 0, 1, 0, 147, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 23, 23, 23, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 17, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 0, 1, 0, 1, 0, 17, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42 + 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 17, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 0, 1, 0, 1, 0, 17, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, ]; let update_result = rapid_sync.update_network_graph_no_std(&example_input[..], None); assert!(update_result.is_ok()); - logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 255 bytes of additional data in node announcement", 3); - logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 147 bytes of additional data in channel announcement", 1); - logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 17 bytes of additional data in channel update", 1); + logger.assert_log_contains( + "lightning_rapid_gossip_sync::processing", + "Ignoring 255 bytes of additional data in node announcement", + 3, + ); + logger.assert_log_contains( + "lightning_rapid_gossip_sync::processing", + "Ignoring 147 bytes of additional data in channel announcement", + 1, + ); + logger.assert_log_contains( + "lightning_rapid_gossip_sync::processing", + "Ignoring 17 bytes of additional data in channel update", + 1, + ); } #[test] @@ -690,10 +778,7 @@ mod tests { let rapid_sync = RapidGossipSync::new(&network_graph, &logger); let initialization_result = rapid_sync.update_network_graph(&initialization_input[..]); if initialization_result.is_err() { - panic!( - "Unexpected initialization result: {:?}", - initialization_result - ) + panic!("Unexpected initialization result: {:?}", initialization_result) } assert_eq!(network_graph.read_only().channels().len(), 2); @@ -756,7 +841,8 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2, 68, 226, 0, 6, 11, 0, 1, 128, ]; - let update_result = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]); + let update_result = + rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]); if update_result.is_err() { panic!("Unexpected update result: {:?}", update_result) } @@ -816,9 +902,11 @@ mod tests { 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2, 68, 226, 0, 6, 11, 0, 1, 128, ]; - let update_result_1 = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]); + let update_result_1 = + rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]); // Apply duplicate update - let update_result_2 = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]); + let update_result_2 = + rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]); assert!(update_result_1.is_ok()); assert!(update_result_2.is_ok()); } @@ -881,7 +969,8 @@ mod tests { assert_eq!(network_graph.read_only().channels().len(), 0); let rapid_sync = RapidGossipSync::new(&network_graph, &logger); - let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time)); + let update_result = rapid_sync + .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time)); assert!(update_result.is_ok()); assert_eq!(network_graph.read_only().channels().len(), 2); } @@ -891,7 +980,8 @@ mod tests { assert_eq!(network_graph.read_only().channels().len(), 0); let rapid_sync = RapidGossipSync::new(&network_graph, &logger); - let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time + 1)); + let update_result = rapid_sync + .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time + 1)); assert!(update_result.is_ok()); assert_eq!(network_graph.read_only().channels().len(), 0); } @@ -910,7 +1000,8 @@ mod tests { assert_eq!(network_graph.read_only().channels().len(), 0); let rapid_sync = RapidGossipSync::new(&network_graph, &logger); - let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_succeeding_time)); + let update_result = rapid_sync + .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_succeeding_time)); assert!(update_result.is_ok()); assert_eq!(network_graph.read_only().channels().len(), 0); } @@ -920,7 +1011,8 @@ mod tests { assert_eq!(network_graph.read_only().channels().len(), 0); let rapid_sync = RapidGossipSync::new(&network_graph, &logger); - let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(earliest_failing_time)); + let update_result = rapid_sync + .update_network_graph_no_std(&VALID_RGS_BINARY, Some(earliest_failing_time)); assert!(update_result.is_err()); if let Err(GraphSyncError::LightningError(lightning_error)) = update_result { assert_eq!( @@ -980,7 +1072,10 @@ mod tests { let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(0)); assert!(update_result.is_err()); if let Err(GraphSyncError::LightningError(err)) = update_result { - assert_eq!(err.err, "Rapid Gossip Sync data's chain hash does not match the network graph's"); + assert_eq!( + err.err, + "Rapid Gossip Sync data's chain hash does not match the network graph's" + ); } else { panic!("Unexpected update result: {:?}", update_result) } diff --git a/lightning/src/blinded_path/message.rs b/lightning/src/blinded_path/message.rs index 1f3f5a1f..bdbb4be4 100644 --- a/lightning/src/blinded_path/message.rs +++ b/lightning/src/blinded_path/message.rs @@ -30,7 +30,7 @@ use core::mem; use core::ops::Deref; /// An intermediate node, and possibly a short channel id leading to the next node. -#[derive(Clone, Debug, Hash, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] pub struct ForwardNode { /// This node's pubkey. pub node_id: PublicKey, @@ -106,6 +106,8 @@ pub(super) fn blinded_hops( // Advance the blinded onion message path by one hop, so make the second hop into the new // introduction node. +// +// Will only modify `path` when returning `Ok`. pub(crate) fn advance_path_by_one( path: &mut BlindedPath, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1 ) -> Result<(), ()> @@ -116,8 +118,8 @@ where { let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &path.blinding_point, None)?; let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes()); - let encrypted_control_tlvs = path.blinded_hops.remove(0).encrypted_payload; - let mut s = Cursor::new(&encrypted_control_tlvs); + let encrypted_control_tlvs = &path.blinded_hops.get(0).ok_or(())?.encrypted_payload; + let mut s = Cursor::new(encrypted_control_tlvs); let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64); match ChaChaPolyReadAdapter::read(&mut reader, rho) { Ok(ChaChaPolyReadAdapter { @@ -139,6 +141,7 @@ where }; mem::swap(&mut path.blinding_point, &mut new_blinding_point); path.introduction_node = IntroductionNode::NodeId(next_node_id); + path.blinded_hops.remove(0); Ok(()) }, _ => Err(()) diff --git a/lightning/src/blinded_path/payment.rs b/lightning/src/blinded_path/payment.rs index 3b71f77c..7df7d1c6 100644 --- a/lightning/src/blinded_path/payment.rs +++ b/lightning/src/blinded_path/payment.rs @@ -13,17 +13,24 @@ use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey}; -use crate::blinded_path::BlindedHop; +use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode, NodeIdLookUp}; use crate::blinded_path::utils; +use crate::crypto::streams::ChaChaPolyReadAdapter; use crate::io; +use crate::io::Cursor; use crate::ln::types::PaymentSecret; use crate::ln::channel_state::CounterpartyForwardingInfo; use crate::ln::features::BlindedHopFeatures; use crate::ln::msgs::DecodeError; +use crate::ln::onion_utils; use crate::offers::invoice::BlindedPayInfo; use crate::offers::invoice_request::InvoiceRequestFields; use crate::offers::offer::OfferId; -use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, Writeable, Writer}; +use crate::sign::{NodeSigner, Recipient}; +use crate::util::ser::{FixedLengthReader, LengthReadableArgs, HighZeroBytesDroppedBigSize, Readable, Writeable, Writer}; + +use core::mem; +use core::ops::Deref; #[allow(unused_imports)] use crate::prelude::*; @@ -274,6 +281,43 @@ pub(super) fn blinded_hops( utils::construct_blinded_hops(secp_ctx, pks, tlvs, session_priv) } +// Advance the blinded onion payment path by one hop, so make the second hop into the new +// introduction node. +// +// Will only modify `path` when returning `Ok`. +pub(crate) fn advance_path_by_one( + path: &mut BlindedPath, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1 +) -> Result<(), ()> +where + NS::Target: NodeSigner, + NL::Target: NodeIdLookUp, + T: secp256k1::Signing + secp256k1::Verification, +{ + let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &path.blinding_point, None)?; + let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes()); + let encrypted_control_tlvs = &path.blinded_hops.get(0).ok_or(())?.encrypted_payload; + let mut s = Cursor::new(encrypted_control_tlvs); + let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64); + match ChaChaPolyReadAdapter::read(&mut reader, rho) { + Ok(ChaChaPolyReadAdapter { + readable: BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. }) + }) => { + let next_node_id = match node_id_lookup.next_node_id(short_channel_id) { + Some(node_id) => node_id, + None => return Err(()), + }; + let mut new_blinding_point = onion_utils::next_hop_pubkey( + secp_ctx, path.blinding_point, control_tlvs_ss.as_ref() + ).map_err(|_| ())?; + mem::swap(&mut path.blinding_point, &mut new_blinding_point); + path.introduction_node = IntroductionNode::NodeId(next_node_id); + path.blinded_hops.remove(0); + Ok(()) + }, + _ => Err(()) + } +} + /// `None` if underflow occurs. pub(crate) fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option { let inbound_amt = inbound_amt_msat as u128; diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 58bf5f80..e6bb9d90 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -44,6 +44,7 @@ use crate::prelude::*; use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard}; use core::ops::Deref; use core::sync::atomic::{AtomicUsize, Ordering}; +use bitcoin::hashes::Hash; use bitcoin::secp256k1::PublicKey; /// `Persist` defines behavior for persisting channel monitors: this could mean @@ -260,10 +261,11 @@ where C::Target: chain::Filter, { let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down."; let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned()); + let channel_count = funding_outpoints.len(); for funding_outpoint in funding_outpoints.iter() { let monitor_lock = self.monitors.read().unwrap(); if let Some(monitor_state) = monitor_lock.get(funding_outpoint) { - if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() { + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(monitor_lock); @@ -278,7 +280,7 @@ where C::Target: chain::Filter, let monitor_states = self.monitors.write().unwrap(); for (funding_outpoint, monitor_state) in monitor_states.iter() { if !funding_outpoints.contains(funding_outpoint) { - if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() { + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() { log_error!(self.logger, "{}", err_str); panic!("{}", err_str); } @@ -297,14 +299,29 @@ where C::Target: chain::Filter, } fn update_monitor_with_chain_data( - &self, header: &Header, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, - monitor_state: &MonitorHolder + &self, header: &Header, best_height: Option, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint, + monitor_state: &MonitorHolder, channel_count: usize, ) -> Result<(), ()> where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let monitor = &monitor_state.monitor; let logger = WithChannelMonitor::from(&self.logger, &monitor, None); - let mut txn_outputs; - { - txn_outputs = process(monitor, txdata); + + let mut txn_outputs = process(monitor, txdata); + + let get_partition_key = |funding_outpoint: &OutPoint| { + let funding_txid_hash = funding_outpoint.txid.to_raw_hash(); + let funding_txid_hash_bytes = funding_txid_hash.as_byte_array(); + let funding_txid_u32 = u32::from_be_bytes([funding_txid_hash_bytes[0], funding_txid_hash_bytes[1], funding_txid_hash_bytes[2], funding_txid_hash_bytes[3]]); + funding_txid_u32.wrapping_add(best_height.unwrap_or_default()) + }; + + let partition_factor = if channel_count < 15 { + 5 + } else { + 50 // ~ 8hours + }; + + let has_pending_claims = monitor_state.monitor.has_pending_claims(); + if has_pending_claims || get_partition_key(funding_outpoint) % partition_factor == 0 { log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) { ChannelMonitorUpdateStatus::Completed => @@ -313,10 +330,10 @@ where C::Target: chain::Filter, ), ChannelMonitorUpdateStatus::InProgress => { log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor)); - }, + } ChannelMonitorUpdateStatus::UnrecoverableError => { return Err(()); - }, + } } } @@ -870,14 +887,17 @@ impl ChannelMonitor { ); } + /// Returns true if the monitor has pending claim requests that are not fully confirmed yet. + pub fn has_pending_claims(&self) -> bool + { + self.inner.lock().unwrap().onchain_tx_handler.has_pending_claims() + } + /// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction /// signature generation failure. pub fn signer_unblocked( @@ -1924,9 +1930,9 @@ impl ChannelMonitor { } #[cfg(test)] - pub fn do_signer_call ()>(&self, mut f: F) { - let inner = self.inner.lock().unwrap(); - f(&inner.onchain_tx_handler.signer); + pub fn do_mut_signer_call ()>(&self, mut f: F) { + let mut inner = self.inner.lock().unwrap(); + f(&mut inner.onchain_tx_handler.signer); } } diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index e5757cfd..07a75361 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -535,6 +535,13 @@ impl OnchainTxHandler { } } + /// Returns true if we are currently tracking any pending claim requests that are not fully + /// confirmed yet. + pub(super) fn has_pending_claims(&self) -> bool + { + self.pending_claim_requests.len() != 0 + } + /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty /// onchain) lays on the assumption of claim transactions getting confirmed before timelock /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck diff --git a/lightning/src/events/mod.rs b/lightning/src/events/mod.rs index d717b7ff..e59a3d18 100644 --- a/lightning/src/events/mod.rs +++ b/lightning/src/events/mod.rs @@ -789,6 +789,8 @@ pub enum Event { /// If the recipient or an intermediate node misbehaves and gives us free money, this may /// overstate the amount paid, though this is unlikely. /// + /// This is only `None` for payments initiated on LDK versions prior to 0.0.103. + /// /// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees fee_paid_msat: Option, }, diff --git a/lightning/src/ln/async_signer_tests.rs b/lightning/src/ln/async_signer_tests.rs index 8e929fe8..b6e5a222 100644 --- a/lightning/src/ln/async_signer_tests.rs +++ b/lightning/src/ln/async_signer_tests.rs @@ -20,6 +20,7 @@ use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureR use crate::ln::functional_test_utils::*; use crate::ln::msgs::ChannelMessageHandler; use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; +use crate::util::test_channel_signer::SignerOp; #[test] fn test_async_commitment_signature_for_funding_created() { @@ -43,7 +44,7 @@ fn test_async_commitment_signature_for_funding_created() { // But! Let's make node[0]'s signer be unavailable: we should *not* broadcast a funding_created // message... let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, false); + nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); check_added_monitors(&nodes[0], 0); @@ -57,7 +58,7 @@ fn test_async_commitment_signature_for_funding_created() { channels[0].channel_id }; - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true); + nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); nodes[0].node.signer_unblocked(Some((nodes[1].node.get_our_node_id(), chan_id))); let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); @@ -98,7 +99,7 @@ fn test_async_commitment_signature_for_funding_signed() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false); + nodes[1].disable_channel_signer_op(&nodes[0].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors(&nodes[1], 1); @@ -111,7 +112,7 @@ fn test_async_commitment_signature_for_funding_signed() { assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len()); channels[0].channel_id }; - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true); + nodes[1].enable_channel_signer_op(&nodes[0].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -152,14 +153,14 @@ fn test_async_commitment_signature_for_commitment_signed() { // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false); + dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(dst, 1); get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id()); // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true); + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); let events = dst.node.get_and_clear_pending_msg_events(); @@ -215,7 +216,7 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { // Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should // *not* broadcast a `funding_signed`... - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false); + nodes[1].disable_channel_signer_op(&nodes[0].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors(&nodes[1], 1); @@ -230,7 +231,7 @@ fn test_async_commitment_signature_for_funding_signed_0conf() { }; // At this point, we basically expect the channel to open like a normal zero-conf channel. - nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true); + nodes[1].enable_channel_signer_op(&nodes[0].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id))); let (funding_signed, channel_ready_1) = { @@ -299,7 +300,7 @@ fn test_async_commitment_signature_for_peer_disconnect() { // Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a // `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false); + dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors(dst, 1); @@ -314,7 +315,7 @@ fn test_async_commitment_signature_for_peer_disconnect() { reconnect_nodes(reconnect_args); // Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`. - dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true); + dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment); dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id))); { @@ -366,7 +367,6 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { route_payment(&nodes[0], &[&nodes[1]], 1_000_000); let error_message = "Channel force-closed"; - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false); if remote_commitment { // Make the counterparty broadcast its latest commitment. @@ -375,6 +375,8 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { check_closed_broadcast(&nodes[1], 1, true); check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100_000); } else { + nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment); + nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderHtlcTransaction); // We'll connect blocks until the sender has to go onchain to time out the HTLC. connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1); @@ -383,7 +385,8 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); // Mark it as available now, we should see the signed commitment transaction. - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true); + nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment); + nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderHtlcTransaction); get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger); } @@ -409,7 +412,13 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { // Mark it as unavailable again to now test the HTLC transaction. We'll mine the commitment such // that the HTLC transaction is retried. - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false); + let sign_htlc_op = if remote_commitment { + SignerOp::SignCounterpartyHtlcTransaction + } else { + SignerOp::SignHolderHtlcTransaction + }; + nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment); + nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, sign_htlc_op); mine_transaction(&nodes[0], &commitment_tx); check_added_monitors(&nodes[0], 1); @@ -426,10 +435,12 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { if anchors && !remote_commitment { handle_bump_htlc_event(&nodes[0], 1); } - assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty()); + let txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert!(txn.is_empty(), "expected no transaction to be broadcast, got {:?}", txn); // Mark it as available now, we should see the signed HTLC transaction. - nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true); + nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment); + nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, sign_htlc_op); get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger); if anchors && !remote_commitment { @@ -443,9 +454,21 @@ fn do_test_async_holder_signatures(anchors: bool, remote_commitment: bool) { } #[test] -fn test_async_holder_signatures() { +fn test_async_holder_signatures_no_anchors() { do_test_async_holder_signatures(false, false); +} + +#[test] +fn test_async_holder_signatures_remote_commitment_no_anchors() { do_test_async_holder_signatures(false, true); +} + +#[test] +fn test_async_holder_signatures_anchors() { do_test_async_holder_signatures(true, false); +} + +#[test] +fn test_async_holder_signatures_remote_commitment_anchors() { do_test_async_holder_signatures(true, true); } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 49360763..ce07cb73 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1612,7 +1612,10 @@ fn test_monitor_update_fail_claim() { let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + // As long as the preimage isn't on-chain, we shouldn't expose the `PaymentClaimed` event to + // users nor send the preimage to peers in the new commitment update. nodes[1].node.claim_funds(payment_preimage_1); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a9f8ff00..4d166cd6 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2118,8 +2118,8 @@ impl ChannelContext where SP::Target: SignerProvider { /// Returns the holder signer for this channel. #[cfg(test)] - pub fn get_signer(&self) -> &ChannelSignerType { - return &self.holder_signer + pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType { + return &mut self.holder_signer } /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0, @@ -2146,6 +2146,143 @@ impl ChannelContext where SP::Target: SignerProvider { } } + /// Performs checks against necessary constraints after receiving either an `accept_channel` or + /// `accept_channel2` message. + pub fn do_accept_channel_checks( + &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures, + common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64, + ) -> Result<(), ChannelError> { + let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits }; + + // Check sanity of message fields: + if !self.is_outbound() { + return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); + } + if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { + return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); + } + if common_fields.dust_limit_satoshis > 21000000 * 100000000 { + return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis))); + } + if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis))); + } + if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis { + return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", + channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis))); + } + let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000; + if common_fields.htlc_minimum_msat >= full_channel_value_msat { + return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat))); + } + let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); + if common_fields.to_self_delay > max_delay_acceptable { + return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay))); + } + if common_fields.max_accepted_htlcs < 1 { + return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); + } + if common_fields.max_accepted_htlcs > MAX_HTLCS { + return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS))); + } + + // Now check against optional parameters as set by config... + if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { + return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); + } + if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { + return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); + } + if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { + return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); + } + if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { + return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); + } + if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { + return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); + } + if common_fields.minimum_depth > peer_limits.max_minimum_depth { + return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth))); + } + + if let Some(ty) = &common_fields.channel_type { + if *ty != self.channel_type { + return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); + } + } else if their_features.supports_channel_type() { + // Assume they've accepted the channel type as they said they understand it. + } else { + let channel_type = ChannelTypeFeatures::from_init(&their_features); + if channel_type != ChannelTypeFeatures::only_static_remote_key() { + return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); + } + self.channel_type = channel_type.clone(); + self.channel_transaction_parameters.channel_type_features = channel_type; + } + + let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { + match &common_fields.shutdown_scriptpubkey { + &Some(ref script) => { + // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything + if script.len() == 0 { + None + } else { + if !script::is_bolt2_compliant(&script, their_features) { + return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); + } + Some(script.clone()) + } + }, + // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel + &None => { + return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); + } + } + } else { None }; + + self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis; + self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000); + self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis); + self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat; + self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs; + + if peer_limits.trust_own_funding_0conf { + self.minimum_depth = Some(common_fields.minimum_depth); + } else { + self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth)); + } + + let counterparty_pubkeys = ChannelPublicKeys { + funding_pubkey: common_fields.funding_pubkey, + revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint), + payment_point: common_fields.payment_basepoint, + delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint), + htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint) + }; + + self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { + selected_contest_delay: common_fields.to_self_delay, + pubkeys: counterparty_pubkeys, + }); + + self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point); + self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; + + self.channel_state = ChannelState::NegotiatingFunding( + NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT + ); + self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. + + Ok(()) + } + /// Returns the block hash in which our funding transaction was confirmed. pub fn get_funding_tx_confirmed_in(&self) -> Option { self.funding_tx_confirmed_in @@ -2631,7 +2768,7 @@ impl ChannelContext where SP::Target: SignerProvider { feerate_per_kw = cmp::max(feerate_per_kw, feerate); } let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000); - cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value())) + cmp::max(feerate_per_kw.saturating_add(2530), feerate_plus_quarter.unwrap_or(u32::MAX)) } /// Get forwarding information for the counterparty. @@ -7498,136 +7635,11 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } // Message handlers - pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> { - let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits }; - - // Check sanity of message fields: - if !self.context.is_outbound() { - return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned())); - } - if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) { - return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned())); - } - if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 { - return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis))); - } - if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis))); - } - if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis { - return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})", - msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis))); - } - let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000; - if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat { - return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat))); - } - let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT); - if msg.common_fields.to_self_delay > max_delay_acceptable { - return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay))); - } - if msg.common_fields.max_accepted_htlcs < 1 { - return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned())); - } - if msg.common_fields.max_accepted_htlcs > MAX_HTLCS { - return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS))); - } - - // Now check against optional parameters as set by config... - if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat { - return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat))); - } - if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat { - return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat))); - } - if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis { - return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis))); - } - if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs { - return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs))); - } - if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS { - return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS))); - } - if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth { - return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth))); - } - - if let Some(ty) = &msg.common_fields.channel_type { - if *ty != self.context.channel_type { - return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned())); - } - } else if their_features.supports_channel_type() { - // Assume they've accepted the channel type as they said they understand it. - } else { - let channel_type = ChannelTypeFeatures::from_init(&their_features); - if channel_type != ChannelTypeFeatures::only_static_remote_key() { - return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned())); - } - self.context.channel_type = channel_type.clone(); - self.context.channel_transaction_parameters.channel_type_features = channel_type; - } - - let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() { - match &msg.common_fields.shutdown_scriptpubkey { - &Some(ref script) => { - // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything - if script.len() == 0 { - None - } else { - if !script::is_bolt2_compliant(&script, their_features) { - return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script))); - } - Some(script.clone()) - } - }, - // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel - &None => { - return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned())); - } - } - } else { None }; - - self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis; - self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000); - self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis); - self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat; - self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs; - - if peer_limits.trust_own_funding_0conf { - self.context.minimum_depth = Some(msg.common_fields.minimum_depth); - } else { - self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth)); - } - - let counterparty_pubkeys = ChannelPublicKeys { - funding_pubkey: msg.common_fields.funding_pubkey, - revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint), - payment_point: msg.common_fields.payment_basepoint, - delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint), - htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint) - }; - - self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters { - selected_contest_delay: msg.common_fields.to_self_delay, - pubkeys: counterparty_pubkeys, - }); - - self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point); - self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey; - - self.context.channel_state = ChannelState::NegotiatingFunding( - NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT - ); - self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now. - - Ok(()) + pub fn accept_channel( + &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, + their_features: &InitFeatures + ) -> Result<(), ChannelError> { + self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis) } /// Handles a funding_signed message from the remote end. diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ea5150cb..d3550d10 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -66,6 +66,7 @@ use crate::offers::invoice_request::{DerivedPayerId, InvoiceRequestBuilder}; use crate::offers::offer::{Offer, OfferBuilder}; use crate::offers::parse::Bolt12SemanticError; use crate::offers::refund::{Refund, RefundBuilder}; +use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler}; use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider}; @@ -4031,8 +4032,8 @@ where self.pending_outbound_payments .send_payment_for_bolt12_invoice( invoice, payment_id, &self.router, self.list_usable_channels(), - || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, - best_block_height, &self.logger, &self.pending_events, + || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self, + &self.secp_ctx, best_block_height, &self.logger, &self.pending_events, |args| self.send_payment_along_path(args) ) } @@ -4895,8 +4896,8 @@ where if short_chan_id != 0 { let mut forwarding_counterparty = None; macro_rules! forwarding_channel_not_found { - () => { - for forward_info in pending_forwards.drain(..) { + ($forward_infos: expr) => { + for forward_info in $forward_infos { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, @@ -5004,7 +5005,7 @@ where let (counterparty_node_id, forward_chan_id) = match chan_info_opt { Some((cp_id, chan_id)) => (cp_id, chan_id), None => { - forwarding_channel_not_found!(); + forwarding_channel_not_found!(pending_forwards.drain(..)); continue; } }; @@ -5012,96 +5013,148 @@ where let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id); if peer_state_mutex_opt.is_none() { - forwarding_channel_not_found!(); + forwarding_channel_not_found!(pending_forwards.drain(..)); continue; } let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; - if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { - let logger = WithChannelContext::from(&self.logger, &chan.context, None); - for forward_info in pending_forwards.drain(..) { - let queue_fail_htlc_res = match forward_info { - HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, - prev_user_channel_id, forward_info: PendingHTLCInfo { - incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, - routing: PendingHTLCRouting::Forward { - onion_packet, blinded, .. - }, skimmed_fee_msat, .. + let mut draining_pending_forwards = pending_forwards.drain(..); + while let Some(forward_info) = draining_pending_forwards.next() { + let queue_fail_htlc_res = match forward_info { + HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { + incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, + routing: PendingHTLCRouting::Forward { + ref onion_packet, blinded, .. + }, skimmed_fee_msat, .. + }, + }) => { + let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { + short_channel_id: prev_short_channel_id, + user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, + outpoint: prev_funding_outpoint, + htlc_id: prev_htlc_id, + incoming_packet_shared_secret: incoming_shared_secret, + // Phantom payments are only PendingHTLCRouting::Receive. + phantom_shared_secret: None, + blinded_failure: blinded.map(|b| b.failure), + }); + let next_blinding_point = blinded.and_then(|b| { + let encrypted_tlvs_ss = self.node_signer.ecdh( + Recipient::Node, &b.inbound_blinding_point, None + ).unwrap().secret_bytes(); + onion_utils::next_hop_pubkey( + &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss + ).ok() + }); + + // Forward the HTLC over the most appropriate channel with the corresponding peer, + // applying non-strict forwarding. + // The channel with the least amount of outbound liquidity will be used to maximize the + // probability of being able to successfully forward a subsequent HTLC. + let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase { + ChannelPhase::Funded(chan) => { + let balances = chan.context.get_available_balances(&self.fee_estimator); + if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat && + outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat && + chan.context.is_usable() { + Some((chan, balances)) + } else { + None + } }, - }) => { - let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash)); - log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id); - let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { - short_channel_id: prev_short_channel_id, - user_channel_id: Some(prev_user_channel_id), - channel_id: prev_channel_id, - outpoint: prev_funding_outpoint, - htlc_id: prev_htlc_id, - incoming_packet_shared_secret: incoming_shared_secret, - // Phantom payments are only PendingHTLCRouting::Receive. - phantom_shared_secret: None, - blinded_failure: blinded.map(|b| b.failure), - }); - let next_blinding_point = blinded.and_then(|b| { - let encrypted_tlvs_ss = self.node_signer.ecdh( - Recipient::Node, &b.inbound_blinding_point, None - ).unwrap().secret_bytes(); - onion_utils::next_hop_pubkey( - &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss - ).ok() - }); - if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat, - payment_hash, outgoing_cltv_value, htlc_source.clone(), - onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator, - &&logger) - { - if let ChannelError::Ignore(msg) = e { - log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg); + _ => None, + }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c); + let optimal_channel = match maybe_optimal_channel { + Some(chan) => chan, + None => { + // Fall back to the specified channel to return an appropriate error. + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + chan } else { - panic!("Stated return value requirements in send_htlc() were not met"); + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; } + } + }; + + let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash)); + let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) { + "specified" + } else { + "alternate" + }; + log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}", + prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id); + if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat, + payment_hash, outgoing_cltv_value, htlc_source.clone(), + onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator, + &&logger) + { + if let ChannelError::Ignore(msg) = e { + log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg); + } else { + panic!("Stated return value requirements in send_htlc() were not met"); + } + + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan); failed_forwards.push((htlc_source, payment_hash, HTLCFailReason::reason(failure_code, data), HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id } )); - continue; + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; } - None - }, - HTLCForwardInfo::AddHTLC { .. } => { - panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); - }, - HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => { + } + None + }, + HTLCForwardInfo::AddHTLC { .. } => { + panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward"); + }, + HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); - Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id)) - }, - HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { + Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id)) + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; + } + }, + HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id); let res = chan.queue_fail_malformed_htlc( htlc_id, failure_code, sha256_of_onion, &&logger ); Some((res, htlc_id)) - }, - }; - if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { - if let Err(e) = queue_fail_htlc_res { - if let ChannelError::Ignore(msg) = e { + } else { + forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards)); + break; + } + }, + }; + if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res { + if let Err(e) = queue_fail_htlc_res { + if let ChannelError::Ignore(msg) = e { + if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) { + let logger = WithChannelContext::from(&self.logger, &chan.context, None); log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg); - } else { - panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); } - // fail-backs are best-effort, we probably already have one - // pending, and if not that's OK, if not, the channel is on - // the chain and sending the HTLC-Timeout is their problem. - continue; + } else { + panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met"); } + // fail-backs are best-effort, we probably already have one + // pending, and if not that's OK, if not, the channel is on + // the chain and sending the HTLC-Timeout is their problem. + continue; } } - } else { - forwarding_channel_not_found!(); - continue; } } else { 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) { @@ -6159,21 +6212,13 @@ where } if valid_mpp { for htlc in sources.drain(..) { - let prev_hop_chan_id = htlc.prev_hop.channel_id; - if let Err((pk, err)) = self.claim_funds_from_hop( + self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_, definitely_duplicate| { debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment"); Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }) } - ) { - if let msgs::ErrorAction::IgnoreError = err.err.action { - // We got a temporary failure updating monitor, but will claim the - // HTLC when the monitor updating is restored (or on chain). - let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash)); - log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); - } else { errs.push((pk, err)); } - } + ); } } if !valid_mpp { @@ -6195,9 +6240,10 @@ where } } - fn claim_funds_from_hop, bool) -> Option>(&self, - prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc) - -> Result<(), (PublicKey, MsgHandleErrInternal)> { + fn claim_funds_from_hop, bool) -> Option>( + &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, + completion_action: ComplFunc, + ) { //TODO: Delay the claimed_funds relaying just like we do outbound relay! // If we haven't yet run background events assume we're still deserializing and shouldn't @@ -6259,7 +6305,7 @@ where let action = if let Some(action) = completion_action(None, true) { action } else { - return Ok(()); + return; }; mem::drop(peer_state_lock); @@ -6275,7 +6321,7 @@ where } else { debug_assert!(false, "Duplicate claims should always free another channel immediately"); - return Ok(()); + return; }; if let Some(peer_state_mtx) = per_peer_state.get(&node_id) { let mut peer_state = peer_state_mtx.lock().unwrap(); @@ -6300,7 +6346,7 @@ where } } } - return Ok(()); + return; } } } @@ -6348,7 +6394,6 @@ where // generally always allowed to be duplicative (and it's specifically noted in // `PaymentForwarded`). self.handle_monitor_update_completion_actions(completion_action(None, false)); - Ok(()) } fn finalize_claims(&self, sources: Vec) { @@ -6381,7 +6426,7 @@ where let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; - let res = self.claim_funds_from_hop(hop_data, payment_preimage, + self.claim_funds_from_hop(hop_data, payment_preimage, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { @@ -6475,10 +6520,6 @@ where }) } }); - if let Err((pk, err)) = res { - let result: Result<(), _> = Err(err); - let _ = handle_error!(self, result, pk); - } }, } } @@ -10337,6 +10378,17 @@ where }, } }, + #[cfg(async_payments)] + OffersMessage::StaticInvoice(_invoice) => { + match responder { + Some(responder) => { + responder.respond(OffersMessage::InvoiceError( + InvoiceError::from_string("Static invoices not yet supported".to_string()) + )) + }, + None => return ResponseInstruction::NoResponse, + } + }, OffersMessage::InvoiceError(invoice_error) => { log_trace!(self.logger, "Received invoice_error: {}", invoice_error); ResponseInstruction::NoResponse @@ -10349,6 +10401,31 @@ where } } +impl +AsyncPaymentsMessageHandler for ChannelManager +where + M::Target: chain::Watch<::EcdsaSigner>, + T::Target: BroadcasterInterface, + ES::Target: EntropySource, + NS::Target: NodeSigner, + SP::Target: SignerProvider, + F::Target: FeeEstimator, + R::Target: Router, + L::Target: Logger, +{ + fn held_htlc_available( + &self, _message: HeldHtlcAvailable, _responder: Option + ) -> ResponseInstruction { + ResponseInstruction::NoResponse + } + + fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {} + + fn release_pending_messages(&self) -> Vec> { + Vec::new() + } +} + impl NodeIdLookUp for ChannelManager where diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index cfec6fb1..7f1ac422 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -31,6 +31,8 @@ use crate::util::errors::APIError; use crate::util::logger::Logger; use crate::util::scid_utils; use crate::util::test_channel_signer::TestChannelSigner; +#[cfg(test)] +use crate::util::test_channel_signer::SignerOp; use crate::util::test_utils; use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface}; use crate::util::ser::{ReadableArgs, Writeable}; @@ -420,6 +422,7 @@ type TestOnionMessenger<'chan_man, 'node_cfg, 'chan_mon_cfg> = OnionMessenger< &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>, &'node_cfg test_utils::TestMessageRouter<'chan_mon_cfg>, &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>, + &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>, IgnoringMessageHandler, >; @@ -482,46 +485,74 @@ impl<'a, 'b, 'c> Node<'a, 'b, 'c> { pub fn get_block_header(&self, height: u32) -> Header { self.blocks.lock().unwrap()[height as usize].0.header } - /// Changes the channel signer's availability for the specified peer and channel. + + /// Toggles this node's signer to be available for the given signer operation. + /// This is useful for testing behavior for restoring an async signer that previously + /// could not return a signature immediately. + #[cfg(test)] + pub fn enable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) { + self.set_channel_signer_ops(peer_id, chan_id, signer_op, true); + } + + /// Toggles this node's signer to be unavailable, returning `Err` for the given signer operation. + /// This is useful for testing behavior for an async signer that cannot return a signature + /// immediately. + #[cfg(test)] + pub fn disable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) { + self.set_channel_signer_ops(peer_id, chan_id, signer_op, false); + } + + /// Changes the channel signer's availability for the specified peer, channel, and signer + /// operation. /// - /// When `available` is set to `true`, the channel signer will behave normally. When set to - /// `false`, the channel signer will act like an off-line remote signer and will return `Err` for - /// several of the signing methods. Currently, only `get_per_commitment_point` and - /// `release_commitment_secret` are affected by this setting. + /// For the specified signer operation, when `available` is set to `true`, the channel signer + /// will behave normally, returning `Ok`. When set to `false`, and the channel signer will + /// act like an off-line remote signer, returning `Err`. This applies to the signer in all + /// relevant places, i.e. the channel manager, chain monitor, and the keys manager. #[cfg(test)] - pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) { + fn set_channel_signer_ops(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp, available: bool) { use crate::sign::ChannelSigner; log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available); let per_peer_state = self.node.per_peer_state.read().unwrap(); - let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap(); + let mut chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap(); let mut channel_keys_id = None; - if let Some(chan) = chan_lock.channel_by_id.get(chan_id).map(|phase| phase.context()) { - chan.get_signer().as_ecdsa().unwrap().set_available(available); + if let Some(chan) = chan_lock.channel_by_id.get_mut(chan_id).map(|phase| phase.context_mut()) { + let signer = chan.get_mut_signer().as_mut_ecdsa().unwrap(); + if available { + signer.enable_op(signer_op); + } else { + signer.disable_op(signer_op); + } channel_keys_id = Some(chan.channel_keys_id); } - let mut monitor = None; - for (funding_txo, channel_id) in self.chain_monitor.chain_monitor.list_monitors() { - if *chan_id == channel_id { - monitor = self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok(); - } - } + let monitor = self.chain_monitor.chain_monitor.list_monitors().into_iter() + .find(|(_, channel_id)| *channel_id == *chan_id) + .and_then(|(funding_txo, _)| self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok()); if let Some(monitor) = monitor { - monitor.do_signer_call(|signer| { + monitor.do_mut_signer_call(|signer| { channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id())); - signer.set_available(available) + if available { + signer.enable_op(signer_op); + } else { + signer.disable_op(signer_op); + } }); } + let channel_keys_id = channel_keys_id.unwrap(); + let mut unavailable_signers_ops = self.keys_manager.unavailable_signers_ops.lock().unwrap(); + let entry = unavailable_signers_ops.entry(channel_keys_id).or_insert(new_hash_set()); if available { - self.keys_manager.unavailable_signers.lock().unwrap() - .remove(channel_keys_id.as_ref().unwrap()); + entry.remove(&signer_op); + if entry.is_empty() { + unavailable_signers_ops.remove(&channel_keys_id); + } } else { - self.keys_manager.unavailable_signers.lock().unwrap() - .insert(channel_keys_id.unwrap()); - } + entry.insert(signer_op); + }; } } @@ -3228,7 +3259,7 @@ pub fn create_network<'a, 'b: 'a, 'c: 'b>(node_count: usize, cfgs: &'b Vec Weight { - let inputs_weight = self.inputs.iter().fold( - Weight::from_wu(0), - |weight, InteractiveTxInput { prev_output, .. }| { - weight.checked_add(estimate_input_weight(prev_output)).unwrap_or(Weight::MAX) - }, - ); - let outputs_weight = self.outputs.iter().fold( - Weight::from_wu(0), - |weight, InteractiveTxOutput { tx_out, .. }| { - weight.checked_add(get_output_weight(&tx_out.script_pubkey)).unwrap_or(Weight::MAX) - }, - ); + let inputs_weight = self.inputs.iter().fold(Weight::from_wu(0), |weight, input| { + weight.checked_add(estimate_input_weight(input.prev_output())).unwrap_or(Weight::MAX) + }); + let outputs_weight = self.outputs.iter().fold(Weight::from_wu(0), |weight, output| { + weight.checked_add(get_output_weight(&output.script_pubkey())).unwrap_or(Weight::MAX) + }); Weight::from_wu(TX_COMMON_FIELDS_WEIGHT) .checked_add(inputs_weight) .and_then(|weight| weight.checked_add(outputs_weight)) @@ -187,13 +169,12 @@ impl ConstructedTransaction { // Inputs and outputs must be sorted by serial_id let ConstructedTransaction { mut inputs, mut outputs, .. } = self; - inputs.sort_unstable_by_key(|InteractiveTxInput { serial_id, .. }| *serial_id); - outputs.sort_unstable_by_key(|InteractiveTxOutput { serial_id, .. }| *serial_id); + inputs.sort_unstable_by_key(|input| input.serial_id()); + outputs.sort_unstable_by_key(|output| output.serial_id); - let input: Vec = - inputs.into_iter().map(|InteractiveTxInput { input, .. }| input).collect(); + let input: Vec = inputs.into_iter().map(|input| input.txin().clone()).collect(); let output: Vec = - outputs.into_iter().map(|InteractiveTxOutput { tx_out, .. }| tx_out).collect(); + outputs.into_iter().map(|output| output.tx_out().clone()).collect(); Transaction { version: Version::TWO, lock_time: self.lock_time, input, output } } @@ -205,9 +186,25 @@ struct NegotiationContext { received_tx_add_input_count: u16, received_tx_add_output_count: u16, inputs: HashMap, + /// The output script intended to be the new funding output script. + /// The script pubkey is used to determine which output is the funding output. + /// When an output with the same script pubkey is added by any of the nodes, it will be + /// treated as the shared output. + /// The value is the holder's intended contribution to the shared funding output. + /// The rest is the counterparty's contribution. + /// When the funding output is added (recognized by its output script pubkey), it will be marked + /// as shared, and split between the peers according to the local value. + /// If the local value is found to be larger than the actual funding output, an error is generated. + expected_shared_funding_output: (ScriptBuf, u64), + /// The actual new funding output, set only after the output has actually been added. + /// NOTE: this output is also included in `outputs`. + actual_new_funding_output: Option, prevtx_outpoints: HashSet, + /// The outputs added so far. outputs: HashMap, + /// The locktime of the funding transaction. tx_locktime: AbsoluteLockTime, + /// The fee rate used for the transaction feerate_sat_per_kw: u32, } @@ -236,26 +233,51 @@ fn is_serial_id_valid_for_counterparty(holder_is_initiator: bool, serial_id: &Se } impl NegotiationContext { + fn new( + holder_is_initiator: bool, expected_shared_funding_output: (ScriptBuf, u64), + tx_locktime: AbsoluteLockTime, feerate_sat_per_kw: u32, + ) -> Self { + NegotiationContext { + holder_is_initiator, + received_tx_add_input_count: 0, + received_tx_add_output_count: 0, + inputs: new_hash_map(), + expected_shared_funding_output, + actual_new_funding_output: None, + prevtx_outpoints: new_hash_set(), + outputs: new_hash_map(), + tx_locktime, + feerate_sat_per_kw, + } + } + + fn set_actual_new_funding_output( + &mut self, tx_out: TxOut, + ) -> Result { + if self.actual_new_funding_output.is_some() { + return Err(AbortReason::DuplicateFundingOutput); + } + let value = tx_out.value.to_sat(); + let local_owned = self.expected_shared_funding_output.1; + // Sanity check + if local_owned > value { + return Err(AbortReason::InvalidLowFundingOutputValue); + } + let shared_output = SharedOwnedOutput::new(tx_out, local_owned); + self.actual_new_funding_output = Some(shared_output.clone()); + Ok(shared_output) + } + fn is_serial_id_valid_for_counterparty(&self, serial_id: &SerialId) -> bool { is_serial_id_valid_for_counterparty(self.holder_is_initiator, serial_id) } fn remote_inputs_value(&self) -> u64 { - self.inputs - .iter() - .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |acc, (_, InteractiveTxInput { prev_output, .. })| { - acc.saturating_add(prev_output.value.to_sat()) - }) + self.inputs.iter().fold(0u64, |acc, (_, input)| acc.saturating_add(input.remote_value())) } fn remote_outputs_value(&self) -> u64 { - self.outputs - .iter() - .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |acc, (_, InteractiveTxOutput { tx_out, .. })| { - acc.saturating_add(tx_out.value.to_sat()) - }) + self.outputs.iter().fold(0u64, |acc, (_, output)| acc.saturating_add(output.remote_value())) } fn remote_inputs_weight(&self) -> Weight { @@ -263,8 +285,8 @@ impl NegotiationContext { self.inputs .iter() .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |weight, (_, InteractiveTxInput { prev_output, .. })| { - weight.saturating_add(estimate_input_weight(prev_output).to_wu()) + .fold(0u64, |weight, (_, input)| { + weight.saturating_add(estimate_input_weight(input.prev_output()).to_wu()) }), ) } @@ -274,8 +296,8 @@ impl NegotiationContext { self.outputs .iter() .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id)) - .fold(0u64, |weight, (_, InteractiveTxOutput { tx_out, .. })| { - weight.saturating_add(get_output_weight(&tx_out.script_pubkey).to_wu()) + .fold(0u64, |weight, (_, output)| { + weight.saturating_add(get_output_weight(&output.script_pubkey()).to_wu()) }), ) } @@ -347,7 +369,7 @@ impl NegotiationContext { }, hash_map::Entry::Vacant(entry) => { let prev_outpoint = OutPoint { txid, vout: msg.prevtx_out }; - entry.insert(InteractiveTxInput { + entry.insert(InteractiveTxInput::Remote(LocalOrRemoteInput { serial_id: msg.serial_id, input: TxIn { previous_output: prev_outpoint, @@ -355,7 +377,7 @@ impl NegotiationContext { ..Default::default() }, prev_output: prev_out, - }); + })); self.prevtx_outpoints.insert(prev_outpoint); Ok(()) }, @@ -404,7 +426,7 @@ impl NegotiationContext { // bitcoin supply. let mut outputs_value: u64 = 0; for output in self.outputs.iter() { - outputs_value = outputs_value.saturating_add(output.1.tx_out.value.to_sat()); + outputs_value = outputs_value.saturating_add(output.1.value()); } if outputs_value.saturating_add(msg.sats) > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // The receiving node: @@ -433,6 +455,23 @@ impl NegotiationContext { return Err(AbortReason::InvalidOutputScript); } + let txout = TxOut { value: Amount::from_sat(msg.sats), script_pubkey: msg.script.clone() }; + let is_shared = msg.script == self.expected_shared_funding_output.0; + let output = if is_shared { + // this is a shared funding output + let shared_output = self.set_actual_new_funding_output(txout)?; + InteractiveTxOutput { + serial_id: msg.serial_id, + added_by: AddingRole::Remote, + output: OutputOwned::Shared(shared_output), + } + } else { + InteractiveTxOutput { + serial_id: msg.serial_id, + added_by: AddingRole::Remote, + output: OutputOwned::Single(txout), + } + }; match self.outputs.entry(msg.serial_id) { hash_map::Entry::Occupied(_) => { // The receiving node: @@ -441,13 +480,7 @@ impl NegotiationContext { Err(AbortReason::DuplicateSerialId) }, hash_map::Entry::Vacant(entry) => { - entry.insert(InteractiveTxOutput { - serial_id: msg.serial_id, - tx_out: TxOut { - value: Amount::from_sat(msg.sats), - script_pubkey: msg.script.clone(), - }, - }); + entry.insert(output); Ok(()) }, } @@ -470,35 +503,45 @@ impl NegotiationContext { fn sent_tx_add_input(&mut self, msg: &msgs::TxAddInput) -> Result<(), AbortReason> { let tx = msg.prevtx.as_transaction(); - let input = TxIn { + let txin = TxIn { previous_output: OutPoint { txid: tx.txid(), vout: msg.prevtx_out }, sequence: Sequence(msg.sequence), ..Default::default() }; - let prev_output = - tx.output.get(msg.prevtx_out as usize).ok_or(AbortReason::PrevTxOutInvalid)?.clone(); - if !self.prevtx_outpoints.insert(input.previous_output) { + if !self.prevtx_outpoints.insert(txin.previous_output.clone()) { // We have added an input that already exists return Err(AbortReason::PrevTxOutInvalid); } - self.inputs.insert( - msg.serial_id, - InteractiveTxInput { serial_id: msg.serial_id, input, prev_output }, - ); + let vout = txin.previous_output.vout as usize; + let prev_output = tx.output.get(vout).ok_or(AbortReason::PrevTxOutInvalid)?.clone(); + let input = InteractiveTxInput::Local(LocalOrRemoteInput { + serial_id: msg.serial_id, + input: txin, + prev_output, + }); + self.inputs.insert(msg.serial_id, input); Ok(()) } fn sent_tx_add_output(&mut self, msg: &msgs::TxAddOutput) -> Result<(), AbortReason> { - self.outputs.insert( - msg.serial_id, + let txout = TxOut { value: Amount::from_sat(msg.sats), script_pubkey: msg.script.clone() }; + let is_shared = msg.script == self.expected_shared_funding_output.0; + let output = if is_shared { + // this is a shared funding output + let shared_output = self.set_actual_new_funding_output(txout)?; InteractiveTxOutput { serial_id: msg.serial_id, - tx_out: TxOut { - value: Amount::from_sat(msg.sats), - script_pubkey: msg.script.clone(), - }, - }, - ); + added_by: AddingRole::Local, + output: OutputOwned::Shared(shared_output), + } + } else { + InteractiveTxOutput { + serial_id: msg.serial_id, + added_by: AddingRole::Local, + output: OutputOwned::Single(txout), + } + }; + self.outputs.insert(msg.serial_id, output); Ok(()) } @@ -554,6 +597,10 @@ impl NegotiationContext { return Err(AbortReason::ExceededNumberOfInputsOrOutputs); } + if self.actual_new_funding_output.is_none() { + return Err(AbortReason::MissingFundingOutput); + } + // - the peer's paid feerate does not meet or exceed the agreed feerate (based on the minimum fee). self.check_counterparty_fees(remote_inputs_value.saturating_sub(remote_outputs_value))?; @@ -762,17 +809,16 @@ macro_rules! define_state_machine_transitions { } impl StateMachine { - fn new(feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime) -> Self { - let context = NegotiationContext { + fn new( + feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime, + expected_shared_funding_output: (ScriptBuf, u64), + ) -> Self { + let context = NegotiationContext::new( + is_initiator, + expected_shared_funding_output, tx_locktime, - holder_is_initiator: is_initiator, - received_tx_add_input_count: 0, - received_tx_add_output_count: 0, - inputs: new_hash_map(), - prevtx_outpoints: new_hash_set(), - outputs: new_hash_map(), feerate_sat_per_kw, - }; + ); if is_initiator { Self::ReceivedChangeMsg(ReceivedChangeMsg(context)) } else { @@ -831,11 +877,182 @@ impl StateMachine { ]); } +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum AddingRole { + Local, + Remote, +} + +/// Represents an input -- local or remote (both have the same fields) +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct LocalOrRemoteInput { + serial_id: SerialId, + input: TxIn, + prev_output: TxOut, +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum InteractiveTxInput { + Local(LocalOrRemoteInput), + Remote(LocalOrRemoteInput), + // TODO(splicing) SharedInput should be added +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct SharedOwnedOutput { + tx_out: TxOut, + local_owned: u64, +} + +impl SharedOwnedOutput { + fn new(tx_out: TxOut, local_owned: u64) -> SharedOwnedOutput { + debug_assert!( + local_owned <= tx_out.value.to_sat(), + "SharedOwnedOutput: Inconsistent local_owned value {}, larger than output value {}", + local_owned, + tx_out.value + ); + SharedOwnedOutput { tx_out, local_owned } + } + + fn remote_owned(&self) -> u64 { + self.tx_out.value.to_sat().saturating_sub(self.local_owned) + } +} + +/// Represents an output, with information about +/// its control -- exclusive by the adder or shared --, and +/// its ownership -- value fully owned by the adder or jointly +#[derive(Clone, Debug, Eq, PartialEq)] +pub enum OutputOwned { + /// Belongs to local node -- controlled exclusively and fully belonging to local node + Single(TxOut), + /// Output with shared control, but fully belonging to local node + SharedControlFullyOwned(TxOut), + /// Output with shared control and joint ownership + Shared(SharedOwnedOutput), +} + +impl OutputOwned { + fn tx_out(&self) -> &TxOut { + match self { + OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => tx_out, + OutputOwned::Shared(output) => &output.tx_out, + } + } + + fn value(&self) -> u64 { + self.tx_out().value.to_sat() + } + + fn is_shared(&self) -> bool { + match self { + OutputOwned::Single(_) => false, + OutputOwned::SharedControlFullyOwned(_) => true, + OutputOwned::Shared(_) => true, + } + } + + fn local_value(&self, local_role: AddingRole) -> u64 { + match self { + OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => { + match local_role { + AddingRole::Local => tx_out.value.to_sat(), + AddingRole::Remote => 0, + } + }, + OutputOwned::Shared(output) => output.local_owned, + } + } + + fn remote_value(&self, local_role: AddingRole) -> u64 { + match self { + OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => { + match local_role { + AddingRole::Local => 0, + AddingRole::Remote => tx_out.value.to_sat(), + } + }, + OutputOwned::Shared(output) => output.remote_owned(), + } + } +} + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct InteractiveTxOutput { + serial_id: SerialId, + added_by: AddingRole, + output: OutputOwned, +} + +impl InteractiveTxOutput { + fn tx_out(&self) -> &TxOut { + self.output.tx_out() + } + + fn value(&self) -> u64 { + self.tx_out().value.to_sat() + } + + fn local_value(&self) -> u64 { + self.output.local_value(self.added_by) + } + + fn remote_value(&self) -> u64 { + self.output.remote_value(self.added_by) + } + + fn script_pubkey(&self) -> &ScriptBuf { + &self.output.tx_out().script_pubkey + } +} + +impl InteractiveTxInput { + pub fn serial_id(&self) -> SerialId { + match self { + InteractiveTxInput::Local(input) => input.serial_id, + InteractiveTxInput::Remote(input) => input.serial_id, + } + } + + pub fn txin(&self) -> &TxIn { + match self { + InteractiveTxInput::Local(input) => &input.input, + InteractiveTxInput::Remote(input) => &input.input, + } + } + + pub fn prev_output(&self) -> &TxOut { + match self { + InteractiveTxInput::Local(input) => &input.prev_output, + InteractiveTxInput::Remote(input) => &input.prev_output, + } + } + + pub fn value(&self) -> u64 { + self.prev_output().value.to_sat() + } + + pub fn local_value(&self) -> u64 { + match self { + InteractiveTxInput::Local(input) => input.prev_output.value.to_sat(), + InteractiveTxInput::Remote(_input) => 0, + } + } + + pub fn remote_value(&self) -> u64 { + match self { + InteractiveTxInput::Local(_input) => 0, + InteractiveTxInput::Remote(input) => input.prev_output.value.to_sat(), + } + } +} + pub(crate) struct InteractiveTxConstructor { state_machine: StateMachine, channel_id: ChannelId, inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)>, - outputs_to_contribute: Vec<(SerialId, TxOut)>, + outputs_to_contribute: Vec<(SerialId, OutputOwned)>, } pub(crate) enum InteractiveTxMessageSend { @@ -879,57 +1096,104 @@ pub(crate) enum HandleTxCompleteValue { impl InteractiveTxConstructor { /// Instantiates a new `InteractiveTxConstructor`. /// + /// `expected_remote_shared_funding_output`: In the case when the local node doesn't + /// add a shared output, but it expects a shared output to be added by the remote node, + /// it has to specify the script pubkey, used to determine the shared output, + /// and its (local) contribution from the shared output: + /// 0 when the whole value belongs to the remote node, or + /// positive if owned also by local. + /// Note: The local value cannot be larger that the actual shared output. + /// /// A tuple is returned containing the newly instantiate `InteractiveTxConstructor` and optionally /// an initial wrapped `Tx_` message which the holder needs to send to the counterparty. pub fn new( entropy_source: &ES, channel_id: ChannelId, feerate_sat_per_kw: u32, is_initiator: bool, funding_tx_locktime: AbsoluteLockTime, inputs_to_contribute: Vec<(TxIn, TransactionU16LenLimited)>, - outputs_to_contribute: Vec, - ) -> (Self, Option) + outputs_to_contribute: Vec, + expected_remote_shared_funding_output: Option<(ScriptBuf, u64)>, + ) -> Result<(Self, Option), AbortReason> where ES::Target: EntropySource, { - let state_machine = - StateMachine::new(feerate_sat_per_kw, is_initiator, funding_tx_locktime); - let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> = - inputs_to_contribute + // Sanity check: There can be at most one shared output, local-added or remote-added + let mut expected_shared_funding_output: Option<(ScriptBuf, u64)> = None; + for output in &outputs_to_contribute { + let new_output = match output { + OutputOwned::Single(_tx_out) => None, + OutputOwned::SharedControlFullyOwned(tx_out) => { + Some((tx_out.script_pubkey.clone(), tx_out.value.to_sat())) + }, + OutputOwned::Shared(output) => { + // Sanity check + if output.local_owned > output.tx_out.value.to_sat() { + return Err(AbortReason::InvalidLowFundingOutputValue); + } + Some((output.tx_out.script_pubkey.clone(), output.local_owned)) + }, + }; + if new_output.is_some() { + if expected_shared_funding_output.is_some() + || expected_remote_shared_funding_output.is_some() + { + // more than one local-added shared output or + // one local-added and one remote-expected shared output + return Err(AbortReason::DuplicateFundingOutput); + } + expected_shared_funding_output = new_output; + } + } + if let Some(expected_remote_shared_funding_output) = expected_remote_shared_funding_output { + expected_shared_funding_output = Some(expected_remote_shared_funding_output); + } + if let Some(expected_shared_funding_output) = expected_shared_funding_output { + let state_machine = StateMachine::new( + feerate_sat_per_kw, + is_initiator, + funding_tx_locktime, + expected_shared_funding_output, + ); + let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> = + inputs_to_contribute + .into_iter() + .map(|(input, tx)| { + let serial_id = generate_holder_serial_id(entropy_source, is_initiator); + (serial_id, input, tx) + }) + .collect(); + // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs + // as the user passed them to us to avoid leaking any potential categorization of transactions + // before we pass any of the inputs to the counterparty. + inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id); + let mut outputs_to_contribute: Vec<_> = outputs_to_contribute .into_iter() - .map(|(input, tx)| { + .map(|output| { let serial_id = generate_holder_serial_id(entropy_source, is_initiator); - (serial_id, input, tx) + (serial_id, output) }) .collect(); - // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs - // as the user passed them to us to avoid leaking any potential categorization of transactions - // before we pass any of the inputs to the counterparty. - inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id); - let mut outputs_to_contribute: Vec<(SerialId, TxOut)> = outputs_to_contribute - .into_iter() - .map(|output| { - let serial_id = generate_holder_serial_id(entropy_source, is_initiator); - (serial_id, output) - }) - .collect(); - // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs. - outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id); - let mut constructor = - Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute }; - let message_send = if is_initiator { - match constructor.maybe_send_message() { - Ok(msg_send) => Some(msg_send), - Err(_) => { - debug_assert!( - false, - "We should always be able to start our state machine successfully" - ); - None - }, - } + // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs. + outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id); + let mut constructor = + Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute }; + let message_send = if is_initiator { + match constructor.maybe_send_message() { + Ok(msg_send) => Some(msg_send), + Err(_) => { + debug_assert!( + false, + "We should always be able to start our state machine successfully" + ); + None + }, + } + } else { + None + }; + Ok((constructor, message_send)) } else { - None - }; - (constructor, message_send) + Err(AbortReason::MissingFundingOutput) + } } fn maybe_send_message(&mut self) -> Result { @@ -950,8 +1214,8 @@ impl InteractiveTxConstructor { let msg = msgs::TxAddOutput { channel_id: self.channel_id, serial_id, - sats: output.value.to_sat(), - script: output.script_pubkey, + sats: output.tx_out().value.to_sat(), + script: output.tx_out().script_pubkey.clone(), }; do_state_transition!(self, sent_tx_add_output, &msg)?; Ok(InteractiveTxMessageSend::TxAddOutput(msg)) @@ -1037,6 +1301,7 @@ mod tests { use crate::sign::EntropySource; use crate::util::atomic_counter::AtomicCounter; use crate::util::ser::TransactionU16LenLimited; + use bitcoin::absolute::LockTime as AbsoluteLockTime; use bitcoin::amount::Amount; use bitcoin::blockdata::opcodes; use bitcoin::blockdata::script::Builder; @@ -1045,13 +1310,13 @@ mod tests { use bitcoin::secp256k1::{Keypair, Secp256k1}; use bitcoin::transaction::Version; use bitcoin::{ - absolute::LockTime as AbsoluteLockTime, OutPoint, Sequence, Transaction, TxIn, TxOut, + OutPoint, PubkeyHash, ScriptBuf, Sequence, Transaction, TxIn, TxOut, WPubkeyHash, }; - use bitcoin::{PubkeyHash, ScriptBuf, WPubkeyHash, WScriptHash}; use core::ops::Deref; use super::{ - get_output_weight, P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND, + get_output_weight, AddingRole, OutputOwned, SharedOwnedOutput, + P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND, P2WSH_INPUT_WEIGHT_LOWER_BOUND, TX_COMMON_FIELDS_WEIGHT, }; @@ -1100,10 +1365,14 @@ mod tests { struct TestSession { description: &'static str, inputs_a: Vec<(TxIn, TransactionU16LenLimited)>, - outputs_a: Vec, + outputs_a: Vec, inputs_b: Vec<(TxIn, TransactionU16LenLimited)>, - outputs_b: Vec, + outputs_b: Vec, expect_error: Option<(AbortReason, ErrorCulprit)>, + /// A node adds no shared output, but expects the peer to add one, with the specific script pubkey, and local contribution + a_expected_remote_shared_output: Option<(ScriptBuf, u64)>, + /// B node adds no shared output, but expects the peer to add one, with the specific script pubkey, and local contribution + b_expected_remote_shared_output: Option<(ScriptBuf, u64)>, } fn do_test_interactive_tx_constructor(session: TestSession) { @@ -1127,24 +1396,103 @@ mod tests { let channel_id = ChannelId(entropy_source.get_secure_random_bytes()); let tx_locktime = AbsoluteLockTime::from_height(1337).unwrap(); - let (mut constructor_a, first_message_a) = InteractiveTxConstructor::new( + // funding output sanity check + let shared_outputs_by_a: Vec<_> = + session.outputs_a.iter().filter(|o| o.is_shared()).collect(); + if shared_outputs_by_a.len() > 1 { + println!("Test warning: Expected at most one shared output. NodeA"); + } + let shared_output_by_a = if shared_outputs_by_a.len() >= 1 { + Some(shared_outputs_by_a[0].value()) + } else { + None + }; + let shared_outputs_by_b: Vec<_> = + session.outputs_b.iter().filter(|o| o.is_shared()).collect(); + if shared_outputs_by_b.len() > 1 { + println!("Test warning: Expected at most one shared output. NodeB"); + } + let shared_output_by_b = if shared_outputs_by_b.len() >= 1 { + Some(shared_outputs_by_b[0].value()) + } else { + None + }; + if session.a_expected_remote_shared_output.is_some() + || session.b_expected_remote_shared_output.is_some() + { + let expected_by_a = if let Some(a_expected_remote_shared_output) = + &session.a_expected_remote_shared_output + { + a_expected_remote_shared_output.1 + } else { + if shared_outputs_by_a.len() >= 1 { + shared_outputs_by_a[0].local_value(AddingRole::Local) + } else { + 0 + } + }; + let expected_by_b = if let Some(b_expected_remote_shared_output) = + &session.b_expected_remote_shared_output + { + b_expected_remote_shared_output.1 + } else { + if shared_outputs_by_b.len() >= 1 { + shared_outputs_by_b[0].local_value(AddingRole::Local) + } else { + 0 + } + }; + + let expected_sum = expected_by_a + expected_by_b; + let actual_shared_output = + shared_output_by_a.unwrap_or(shared_output_by_b.unwrap_or(0)); + if expected_sum != actual_shared_output { + println!("Test warning: Sum of expected shared output values does not match actual shared output value, {} {} {} {} {} {}", expected_sum, actual_shared_output, expected_by_a, expected_by_b, shared_output_by_a.unwrap_or(0), shared_output_by_b.unwrap_or(0)); + } + } + + let (mut constructor_a, first_message_a) = match InteractiveTxConstructor::new( entropy_source, channel_id, TEST_FEERATE_SATS_PER_KW, true, tx_locktime, session.inputs_a, - session.outputs_a, - ); - let (mut constructor_b, first_message_b) = InteractiveTxConstructor::new( + session.outputs_a.iter().map(|o| o.clone()).collect(), + session.a_expected_remote_shared_output, + ) { + Ok(r) => r, + Err(abort_reason) => { + assert_eq!( + Some((abort_reason, ErrorCulprit::NodeA)), + session.expect_error, + "Test: {}", + session.description + ); + return; + }, + }; + let (mut constructor_b, first_message_b) = match InteractiveTxConstructor::new( entropy_source, channel_id, TEST_FEERATE_SATS_PER_KW, false, tx_locktime, session.inputs_b, - session.outputs_b, - ); + session.outputs_b.iter().map(|o| o.clone()).collect(), + session.b_expected_remote_shared_output, + ) { + Ok(r) => r, + Err(abort_reason) => { + assert_eq!( + Some((abort_reason, ErrorCulprit::NodeB)), + session.expect_error, + "Test: {}", + session.description + ); + return; + }, + }; let handle_message_send = |msg: InteractiveTxMessageSend, for_constructor: &mut InteractiveTxConstructor| { @@ -1194,7 +1542,7 @@ mod tests { "Test: {}", session.description ); - assert!(message_send_b.is_none()); + assert!(message_send_b.is_none(), "Test: {}", session.description); return; }, } @@ -1218,7 +1566,7 @@ mod tests { "Test: {}", session.description ); - assert!(message_send_a.is_none()); + assert!(message_send_a.is_none(), "Test: {}", session.description); return; }, } @@ -1227,12 +1575,18 @@ mod tests { assert!(message_send_a.is_none()); assert!(message_send_b.is_none()); assert_eq!(final_tx_a.unwrap().into_unsigned_tx(), final_tx_b.unwrap().into_unsigned_tx()); - assert!(session.expect_error.is_none(), "Test: {}", session.description); + assert!( + session.expect_error.is_none(), + "Missing expected error {:?}, Test: {}", + session.expect_error, + session.description, + ); } #[derive(Debug, Clone, Copy)] enum TestOutput { P2WPKH(u64), + /// P2WSH, but with the specific script used for the funding output P2WSH(u64), P2TR(u64), // Non-witness type to test rejection. @@ -1246,12 +1600,8 @@ mod tests { fn generate_txout(output: &TestOutput) -> TxOut { let secp_ctx = Secp256k1::new(); let (value, script_pubkey) = match output { - TestOutput::P2WPKH(value) => { - (*value, ScriptBuf::new_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap())) - }, - TestOutput::P2WSH(value) => { - (*value, ScriptBuf::new_p2wsh(&WScriptHash::from_slice(&[2; 32]).unwrap())) - }, + TestOutput::P2WPKH(value) => (*value, generate_p2wpkh_script_pubkey()), + TestOutput::P2WSH(value) => (*value, generate_funding_script_pubkey()), TestOutput::P2TR(value) => ( *value, ScriptBuf::new_p2tr( @@ -1306,8 +1656,39 @@ mod tests { ScriptBuf::new_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap()) } - fn generate_outputs(outputs: &[TestOutput]) -> Vec { - outputs.iter().map(generate_txout).collect() + fn generate_funding_script_pubkey() -> ScriptBuf { + Builder::new().push_int(33).into_script().to_p2wsh() + } + + fn generate_output_nonfunding_one(output: &TestOutput) -> OutputOwned { + OutputOwned::Single(generate_txout(output)) + } + + fn generate_outputs(outputs: &[TestOutput]) -> Vec { + outputs.iter().map(|o| generate_output_nonfunding_one(o)).collect() + } + + /// Generate a single output that is the funding output + fn generate_output(output: &TestOutput) -> Vec { + vec![OutputOwned::SharedControlFullyOwned(generate_txout(output))] + } + + /// Generate a single P2WSH output that is the funding output + fn generate_funding_output(value: u64) -> Vec { + generate_output(&TestOutput::P2WSH(value)) + } + + /// Generate a single P2WSH output with shared contribution that is the funding output + fn generate_shared_funding_output_one(value: u64, local_value: u64) -> OutputOwned { + OutputOwned::Shared(SharedOwnedOutput { + tx_out: generate_txout(&TestOutput::P2WSH(value)), + local_owned: local_value, + }) + } + + /// Generate a single P2WSH output with shared contribution that is the funding output + fn generate_shared_funding_output(value: u64, local_value: u64) -> Vec { + vec![generate_shared_funding_output_one(value, local_value)] } fn generate_fixed_number_of_inputs(count: u16) -> Vec<(TxIn, TransactionU16LenLimited)> { @@ -1349,7 +1730,7 @@ mod tests { inputs } - fn generate_fixed_number_of_outputs(count: u16) -> Vec { + fn generate_fixed_number_of_outputs(count: u16) -> Vec { // Set a constant value for each TxOut generate_outputs(&vec![TestOutput::P2WPKH(1_000_000); count as usize]) } @@ -1358,8 +1739,11 @@ mod tests { Builder::new().push_opcode(opcodes::OP_TRUE).into_script().to_p2sh() } - fn generate_non_witness_output(value: u64) -> TxOut { - TxOut { value: Amount::from_sat(value), script_pubkey: generate_p2sh_script_pubkey() } + fn generate_non_witness_output(value: u64) -> OutputOwned { + OutputOwned::Single(TxOut { + value: Amount::from_sat(value), + script_pubkey: generate_p2sh_script_pubkey(), + }) } #[test] @@ -1370,15 +1754,19 @@ mod tests { outputs_a: vec![], inputs_b: vec![], outputs_b: vec![], - expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + expect_error: Some((AbortReason::MissingFundingOutput, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: None, }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution, no initiator inputs", inputs_a: vec![], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution, no initiator outputs", @@ -1386,15 +1774,19 @@ mod tests { outputs_a: vec![], inputs_b: vec![], outputs_b: vec![], - expect_error: None, + expect_error: Some((AbortReason::MissingFundingOutput, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: None, }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution, no fees", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let p2wpkh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WPKH_INPUT_WEIGHT_LOWER_BOUND); let outputs_fee = fee_for_weight( @@ -1403,92 +1795,106 @@ mod tests { ); let tx_common_fields_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, TX_COMMON_FIELDS_WEIGHT); + + let amount_adjusted_with_p2wpkh_fee = + 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee; do_test_interactive_tx_constructor(TestSession { description: "Single contribution, with P2WPKH input, insufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */ - )]), + outputs_a: generate_output(&TestOutput::P2WPKH( + amount_adjusted_with_p2wpkh_fee + 1, /* makes fees insuffcient for initiator */ + )), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution with P2WPKH input, sufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee, - )]), + outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2wpkh_fee)), inputs_b: vec![], outputs_b: vec![], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let p2wsh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WSH_INPUT_WEIGHT_LOWER_BOUND); + let amount_adjusted_with_p2wsh_fee = + 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee; do_test_interactive_tx_constructor(TestSession { description: "Single contribution, with P2WSH input, insufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */ - )]), + outputs_a: generate_output(&TestOutput::P2WPKH( + amount_adjusted_with_p2wsh_fee + 1, /* makes fees insuffcient for initiator */ + )), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution with P2WSH input, sufficient fees", inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee, - )]), + outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2wsh_fee)), inputs_b: vec![], outputs_b: vec![], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let p2tr_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2TR_INPUT_WEIGHT_LOWER_BOUND); + let amount_adjusted_with_p2tr_fee = + 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee; do_test_interactive_tx_constructor(TestSession { description: "Single contribution, with P2TR input, insufficient fees", inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */ - )]), + outputs_a: generate_output(&TestOutput::P2WPKH( + amount_adjusted_with_p2tr_fee + 1, /* makes fees insuffcient for initiator */ + )), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Single contribution with P2TR input, sufficient fees", inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH( - 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee, - )]), + outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2tr_fee)), inputs_b: vec![], outputs_b: vec![], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator contributes sufficient fees, but non-initiator does not", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), outputs_a: vec![], inputs_b: generate_inputs(&[TestOutput::P2WPKH(100_000)]), - outputs_b: generate_outputs(&[TestOutput::P2WPKH(100_000)]), + outputs_b: generate_output(&TestOutput::P2WPKH(100_000)), expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeB)), + a_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), + b_expected_remote_shared_output: None, }); do_test_interactive_tx_constructor(TestSession { description: "Multi-input-output contributions from both sides", inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000); 2]), - outputs_a: generate_outputs(&[ - TestOutput::P2WPKH(1_000_000), - TestOutput::P2WPKH(200_000), - ]), + outputs_a: vec![ + generate_shared_funding_output_one(1_000_000, 200_000), + generate_output_nonfunding_one(&TestOutput::P2WPKH(200_000)), + ], inputs_b: generate_inputs(&[ TestOutput::P2WPKH(1_000_000), TestOutput::P2WPKH(500_000), ]), - outputs_b: generate_outputs(&[ - TestOutput::P2WPKH(1_000_000), - TestOutput::P2WPKH(400_000), - ]), + outputs_b: vec![generate_output_nonfunding_one(&TestOutput::P2WPKH(400_000))], expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 800_000)), }); do_test_interactive_tx_constructor(TestSession { @@ -1498,6 +1904,8 @@ mod tests { inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); let tx = @@ -1509,10 +1917,12 @@ mod tests { do_test_interactive_tx_constructor(TestSession { description: "Invalid input sequence from initiator", inputs_a: vec![(invalid_sequence_input, tx.clone())], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::IncorrectInputSequenceValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); let duplicate_input = TxIn { previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 }, @@ -1522,11 +1932,14 @@ mod tests { do_test_interactive_tx_constructor(TestSession { description: "Duplicate prevout from initiator", inputs_a: vec![(duplicate_input.clone(), tx.clone()), (duplicate_input, tx.clone())], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeB)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); + // Non-initiator uses same prevout as initiator. let duplicate_input = TxIn { previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 }, sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, @@ -1535,10 +1948,27 @@ mod tests { do_test_interactive_tx_constructor(TestSession { description: "Non-initiator uses same prevout as initiator", inputs_a: vec![(duplicate_input.clone(), tx.clone())], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_shared_funding_output(1_000_000, 905_000), inputs_b: vec![(duplicate_input.clone(), tx.clone())], outputs_b: vec![], expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 95_000)), + }); + let duplicate_input = TxIn { + previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 }, + sequence: Sequence::ENABLE_RBF_NO_LOCKTIME, + ..Default::default() + }; + do_test_interactive_tx_constructor(TestSession { + description: "Non-initiator uses same prevout as initiator", + inputs_a: vec![(duplicate_input.clone(), tx.clone())], + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), + inputs_b: vec![(duplicate_input.clone(), tx.clone())], + outputs_b: vec![], + expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends too many TxAddInputs", @@ -1547,6 +1977,8 @@ mod tests { inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::ReceivedTooManyTxAddInputs, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor_with_entropy_source( TestSession { @@ -1557,6 +1989,8 @@ mod tests { inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }, &DuplicateEntropySource, ); @@ -1567,24 +2001,30 @@ mod tests { inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::ReceivedTooManyTxAddOutputs, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends an output below dust value", inputs_a: vec![], - outputs_a: generate_outputs(&[TestOutput::P2WSH( + outputs_a: generate_funding_output( generate_p2wsh_script_pubkey().dust_value().to_sat() - 1, - )]), + ), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::BelowDustLimit, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends an output above maximum sats allowed", inputs_a: vec![], - outputs_a: generate_outputs(&[TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1)]), + outputs_a: generate_output(&TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::ExceededMaximumSatsAllowed, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Initiator sends an output without a witness program", @@ -1593,6 +2033,8 @@ mod tests { inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::InvalidOutputScript, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor_with_entropy_source( TestSession { @@ -1603,6 +2045,8 @@ mod tests { inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }, &DuplicateEntropySource, ); @@ -1610,10 +2054,12 @@ mod tests { do_test_interactive_tx_constructor(TestSession { description: "Peer contributed more output value than inputs", inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000)]), - outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)), inputs_b: vec![], outputs_b: vec![], expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { @@ -1626,6 +2072,8 @@ mod tests { AbortReason::ExceededNumberOfInputsOrOutputs, ErrorCulprit::Indeterminate, )), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), }); do_test_interactive_tx_constructor(TestSession { description: "Peer contributed more than allowed number of outputs", @@ -1637,6 +2085,121 @@ mod tests { AbortReason::ExceededNumberOfInputsOrOutputs, ErrorCulprit::Indeterminate, )), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + }); + + // Adding multiple outputs to the funding output pubkey is an error + do_test_interactive_tx_constructor(TestSession { + description: "Adding two outputs to the funding output pubkey", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]), + outputs_a: generate_funding_output(100_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(1_001_000)]), + outputs_b: generate_funding_output(100_000), + expect_error: Some((AbortReason::DuplicateFundingOutput, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: None, + }); + + // We add the funding output, but we contribute a little + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by us, small contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_a: generate_shared_funding_output(1_000_000, 10_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_b: vec![], + expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 990_000)), + }); + + // They add the funding output, and we contribute a little + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by them, small contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_a: vec![], + inputs_b: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_b: generate_shared_funding_output(1_000_000, 990_000), + expect_error: None, + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 10_000)), + b_expected_remote_shared_output: None, + }); + + // We add the funding output, and we contribute most + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by us, large contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_a: generate_shared_funding_output(1_000_000, 990_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_b: vec![], + expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 10_000)), + }); + + // They add the funding output, but we contribute most + do_test_interactive_tx_constructor(TestSession { + description: "Funding output by them, large contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(992_000)]), + outputs_a: vec![], + inputs_b: generate_inputs(&[TestOutput::P2WPKH(12_000)]), + outputs_b: generate_shared_funding_output(1_000_000, 10_000), + expect_error: None, + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 990_000)), + b_expected_remote_shared_output: None, + }); + + // During a splice-out, with peer providing more output value than input value + // but still pays enough fees due to their to_remote_value_satoshis portion in + // the shared input. + do_test_interactive_tx_constructor(TestSession { + description: "Splice out with sufficient initiator balance", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(50_000)]), + outputs_a: generate_funding_output(120_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(50_000)]), + outputs_b: vec![], + expect_error: None, + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + }); + + // During a splice-out, with peer providing more output value than input value + // and the to_remote_value_satoshis portion in + // the shared input cannot cover fees + do_test_interactive_tx_constructor(TestSession { + description: "Splice out with insufficient initiator balance", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]), + outputs_a: generate_funding_output(120_000), + inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]), + outputs_b: vec![], + expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)), + }); + + // The actual funding output value is lower than the intended local contribution by the same node + do_test_interactive_tx_constructor(TestSession { + description: "Splice in, invalid intended local contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]), + outputs_a: generate_shared_funding_output(100_000, 120_000), // local value is higher than the output value + inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]), + outputs_b: vec![], + expect_error: Some((AbortReason::InvalidLowFundingOutputValue, ErrorCulprit::NodeA)), + a_expected_remote_shared_output: None, + b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 20_000)), + }); + + // The actual funding output value is lower than the intended local contribution of the other node + do_test_interactive_tx_constructor(TestSession { + description: "Splice in, invalid intended local contribution", + inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]), + outputs_a: vec![], + inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]), + outputs_b: generate_funding_output(100_000), + // The error is caused by NodeA, it occurs when nodeA prepares the message to be sent to NodeB, that's why here it shows up as NodeB + expect_error: Some((AbortReason::InvalidLowFundingOutputValue, ErrorCulprit::NodeB)), + a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 120_000)), // this is higher than the actual output value + b_expected_remote_shared_output: None, }); } diff --git a/lightning/src/ln/offers_tests.rs b/lightning/src/ln/offers_tests.rs index eca43afe..405ab87b 100644 --- a/lightning/src/ln/offers_tests.rs +++ b/lightning/src/ln/offers_tests.rs @@ -192,8 +192,12 @@ fn extract_invoice_request<'a, 'b, 'c>( ParsedOnionMessageContents::Offers(offers_message) => match offers_message { OffersMessage::InvoiceRequest(invoice_request) => (invoice_request, reply_path.unwrap()), OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice), + #[cfg(async_payments)] + OffersMessage::StaticInvoice(invoice) => panic!("Unexpected static invoice: {:?}", invoice), OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error), }, + #[cfg(async_payments)] + ParsedOnionMessageContents::AsyncPayments(message) => panic!("Unexpected async payments message: {:?}", message), ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message), }, Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"), @@ -207,8 +211,12 @@ fn extract_invoice<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, message: &OnionMessage) ParsedOnionMessageContents::Offers(offers_message) => match offers_message { OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request), OffersMessage::Invoice(invoice) => invoice, + #[cfg(async_payments)] + OffersMessage::StaticInvoice(invoice) => panic!("Unexpected static invoice: {:?}", invoice), OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error), }, + #[cfg(async_payments)] + ParsedOnionMessageContents::AsyncPayments(message) => panic!("Unexpected async payments message: {:?}", message), ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message), }, Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"), @@ -224,8 +232,12 @@ fn extract_invoice_error<'a, 'b, 'c>( ParsedOnionMessageContents::Offers(offers_message) => match offers_message { OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request), OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice), + #[cfg(async_payments)] + OffersMessage::StaticInvoice(invoice) => panic!("Unexpected invoice: {:?}", invoice), OffersMessage::InvoiceError(error) => error, }, + #[cfg(async_payments)] + ParsedOnionMessageContents::AsyncPayments(message) => panic!("Unexpected async payments message: {:?}", message), ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message), }, Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"), @@ -950,9 +962,12 @@ fn pays_bolt12_invoice_asynchronously() { ); } -/// Fails creating an offer when a blinded path cannot be created without exposing the node's id. +/// Checks that an offer can be created using an unannounced node as a blinded path's introduction +/// node. This is only preferred if there are no other options which may indicated either the offer +/// is intended for the unannounced node or that the node is actually announced (e.g., an LSP) but +/// the recipient doesn't have a network graph. #[test] -fn fails_creating_offer_without_blinded_paths() { +fn creates_offer_with_blinded_path_using_unannounced_introduction_node() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -960,15 +975,63 @@ fn fails_creating_offer_without_blinded_paths() { create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000); - match nodes[0].node.create_offer_builder(None) { - Ok(_) => panic!("Expected error"), - Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths), + let alice = &nodes[0]; + let alice_id = alice.node.get_our_node_id(); + let bob = &nodes[1]; + let bob_id = bob.node.get_our_node_id(); + + let offer = alice.node + .create_offer_builder(None).unwrap() + .amount_msats(10_000_000) + .build().unwrap(); + assert_ne!(offer.signing_pubkey(), Some(alice_id)); + assert!(!offer.paths().is_empty()); + for path in offer.paths() { + assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id)); } + + let payment_id = PaymentId([1; 32]); + bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None).unwrap(); + expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id); + + let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap(); + alice.onion_messenger.handle_onion_message(&bob_id, &onion_message); + + let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message); + let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext { + offer_id: offer.id(), + invoice_request: InvoiceRequestFields { + payer_id: invoice_request.payer_id(), + quantity: None, + payer_note_truncated: None, + }, + }); + assert_ne!(invoice_request.payer_id(), bob_id); + assert_eq!(reply_path.introduction_node, IntroductionNode::NodeId(alice_id)); + + let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); + bob.onion_messenger.handle_onion_message(&alice_id, &onion_message); + + let invoice = extract_invoice(bob, &onion_message); + assert_ne!(invoice.signing_pubkey(), alice_id); + assert!(!invoice.payment_paths().is_empty()); + for (_, path) in invoice.payment_paths() { + assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id)); + } + + route_bolt12_payment(bob, &[alice], &invoice); + expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id); + + claim_bolt12_payment(bob, &[alice], payment_context); + expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id); } -/// Fails creating a refund when a blinded path cannot be created without exposing the node's id. +/// Checks that a refund can be created using an unannounced node as a blinded path's introduction +/// node. This is only preferred if there are no other options which may indicated either the refund +/// is intended for the unannounced node or that the node is actually announced (e.g., an LSP) but +/// the sender doesn't have a network graph. #[test] -fn fails_creating_refund_without_blinded_paths() { +fn creates_refund_with_blinded_path_using_unannounced_introduction_node() { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); @@ -976,17 +1039,35 @@ fn fails_creating_refund_without_blinded_paths() { create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000); + let alice = &nodes[0]; + let alice_id = alice.node.get_our_node_id(); + let bob = &nodes[1]; + let bob_id = bob.node.get_our_node_id(); + let absolute_expiry = Duration::from_secs(u64::MAX); let payment_id = PaymentId([1; 32]); - - match nodes[0].node.create_refund_builder( - 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None - ) { - Ok(_) => panic!("Expected error"), - Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths), + let refund = bob.node + .create_refund_builder(10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None) + .unwrap() + .build().unwrap(); + assert_ne!(refund.payer_id(), bob_id); + assert!(!refund.paths().is_empty()); + for path in refund.paths() { + assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id)); } + expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id); - assert!(nodes[0].node.list_recent_payments().is_empty()); + let expected_invoice = alice.node.request_refund_payment(&refund).unwrap(); + + let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap(); + + let invoice = extract_invoice(bob, &onion_message); + assert_eq!(invoice, expected_invoice); + assert_ne!(invoice.signing_pubkey(), alice_id); + assert!(!invoice.payment_paths().is_empty()); + for (_, path) in invoice.payment_paths() { + assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id)); + } } /// Fails creating or paying an offer when a blinded path cannot be created because no peers are @@ -1165,8 +1246,7 @@ fn fails_sending_invoice_with_unsupported_chain_for_refund() { } } -/// Fails creating an invoice request when a blinded reply path cannot be created without exposing -/// the node's id. +/// Fails creating an invoice request when a blinded reply path cannot be created. #[test] fn fails_creating_invoice_request_without_blinded_reply_path() { let chanmon_cfgs = create_chanmon_cfgs(6); @@ -1183,7 +1263,7 @@ fn fails_creating_invoice_request_without_blinded_reply_path() { let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]); disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]); - disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]); + disconnect_peers(david, &[bob, charlie, &nodes[4], &nodes[5]]); let offer = alice.node .create_offer_builder(None).unwrap() diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 3ae128cb..1915a4c5 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -13,7 +13,8 @@ use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::secp256k1::{self, Secp256k1, SecretKey}; -use crate::sign::{EntropySource, NodeSigner, Recipient}; +use crate::blinded_path::{IntroductionNode, NodeIdLookUp}; +use crate::blinded_path::payment::advance_path_by_one; use crate::events::{self, PaymentFailureReason}; use crate::ln::types::{PaymentHash, PaymentPreimage, PaymentSecret}; use crate::ln::channel_state::ChannelDetails; @@ -22,6 +23,7 @@ use crate::ln::onion_utils; use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason}; use crate::offers::invoice::Bolt12Invoice; use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router}; +use crate::sign::{EntropySource, NodeSigner, Recipient}; use crate::util::errors::APIError; use crate::util::logger::Logger; use crate::util::time::Time; @@ -71,7 +73,7 @@ pub(crate) enum PendingOutboundPayment { keysend_preimage: Option, custom_tlvs: Vec<(u64, Vec)>, pending_amt_msat: u64, - /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+. + /// Used to track the fee paid. Present iff the payment was serialized on 0.0.103+. pending_fee_msat: Option, /// The total payment amount across all paths, used to verify that a retry is not overpaying. total_msat: u64, @@ -775,10 +777,13 @@ impl OutboundPayments { } } - pub(super) fn send_payment_for_bolt12_invoice( + pub(super) fn send_payment_for_bolt12_invoice< + R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP, L: Deref + >( &self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R, first_hops: Vec, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS, - best_block_height: u32, logger: &L, + node_id_lookup: &NL, secp_ctx: &Secp256k1, best_block_height: u32, + logger: &L, pending_events: &Mutex)>>, send_payment_along_path: SP, ) -> Result<(), Bolt12PaymentError> @@ -786,6 +791,7 @@ impl OutboundPayments { R::Target: Router, ES::Target: EntropySource, NS::Target: NodeSigner, + NL::Target: NodeIdLookUp, L::Target: Logger, IH: Fn() -> InFlightHtlcs, SP: Fn(SendAlongPathArgs) -> Result<(), APIError>, @@ -807,9 +813,30 @@ impl OutboundPayments { hash_map::Entry::Vacant(_) => return Err(Bolt12PaymentError::UnexpectedInvoice), }; - let pay_params = PaymentParameters::from_bolt12_invoice(&invoice); + let mut payment_params = PaymentParameters::from_bolt12_invoice(&invoice); + + // Advance any blinded path where the introduction node is our node. + if let Ok(our_node_id) = node_signer.get_node_id(Recipient::Node) { + for (_, path) in payment_params.payee.blinded_route_hints_mut().iter_mut() { + let introduction_node_id = match path.introduction_node { + IntroductionNode::NodeId(pubkey) => pubkey, + IntroductionNode::DirectedShortChannelId(direction, scid) => { + match node_id_lookup.next_node_id(scid) { + Some(next_node_id) => *direction.select_pubkey(&our_node_id, &next_node_id), + None => continue, + } + }, + }; + if introduction_node_id == our_node_id { + let _ = advance_path_by_one(path, node_signer, node_id_lookup, secp_ctx); + } + } + } + let amount_msat = invoice.amount_msats(); - let mut route_params = RouteParameters::from_payment_params_and_value(pay_params, amount_msat); + let mut route_params = RouteParameters::from_payment_params_and_value( + payment_params, amount_msat + ); if let Some(max_fee_msat) = max_total_routing_fee_msat { route_params.max_total_routing_fee_msat = Some(max_fee_msat); } @@ -1856,6 +1883,7 @@ mod tests { use core::time::Duration; + use crate::blinded_path::EmptyNodeIdLookUp; use crate::events::{Event, PathFailure, PaymentFailureReason}; use crate::ln::types::PaymentHash; use crate::ln::channelmanager::{PaymentId, RecipientOnionFields}; @@ -2199,6 +2227,7 @@ mod tests { let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); + let secp_ctx = Secp256k1::new(); let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); @@ -2227,7 +2256,8 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, - &&keys_manager, 0, &&logger, &pending_events, |_| panic!() + &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events, + |_| panic!() ), Ok(()), ); @@ -2250,6 +2280,7 @@ mod tests { let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); + let secp_ctx = Secp256k1::new(); let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); @@ -2286,7 +2317,8 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, - &&keys_manager, 0, &&logger, &pending_events, |_| panic!() + &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events, + |_| panic!() ), Ok(()), ); @@ -2309,6 +2341,7 @@ mod tests { let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger)); let scorer = RwLock::new(test_utils::TestScorer::new()); let router = test_utils::TestRouter::new(network_graph, &logger, &scorer); + let secp_ctx = Secp256k1::new(); let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet); let pending_events = Mutex::new(VecDeque::new()); @@ -2358,7 +2391,8 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, - &&keys_manager, 0, &&logger, &pending_events, |_| panic!() + &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events, + |_| panic!() ), Err(Bolt12PaymentError::UnexpectedInvoice), ); @@ -2375,7 +2409,8 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, - &&keys_manager, 0, &&logger, &pending_events, |_| Ok(()) + &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events, + |_| Ok(()) ), Ok(()), ); @@ -2385,7 +2420,8 @@ mod tests { assert_eq!( outbound_payments.send_payment_for_bolt12_invoice( &invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager, - &&keys_manager, 0, &&logger, &pending_events, |_| panic!() + &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events, + |_| panic!() ), Err(Bolt12PaymentError::DuplicateInvoice), ); diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index dcfb6ed8..fcfdd5cb 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -2053,22 +2053,28 @@ fn accept_underpaying_htlcs_config() { fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { let chanmon_cfgs = create_chanmon_cfgs(3); let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let max_in_flight_percent = 10; let mut intercept_forwards_config = test_default_channel_config(); intercept_forwards_config.accept_intercept_htlcs = true; + intercept_forwards_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; let mut underpay_config = test_default_channel_config(); underpay_config.channel_config.accept_underpaying_htlcs = true; + underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(underpay_config)]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let amt_msat = 900_000; + let mut chan_ids = Vec::new(); for _ in 0..num_mpp_parts { - let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000, 0); - let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 2_000_000, 0).0.channel_id; + // We choose the channel size so that there can be at most one part pending on each channel. + let channel_size = amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100; + let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_size, 0); + let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0).0.channel_id; chan_ids.push(channel_id); } // Send the initial payment. - let amt_msat = 900_000; let skimmed_fee_msat = 20; let mut route_hints = Vec::new(); for _ in 0..num_mpp_parts { @@ -4098,7 +4104,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { // Create a new channel between C and D as A will refuse to retry on the existing one because // it just failed. - let chan_id_cd_2 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2; + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0); // Now retry the failed HTLC. nodes[0].node.process_pending_htlc_forwards(); @@ -4110,6 +4116,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { expect_pending_htlcs_forwardable!(nodes[2]); check_added_monitors(&nodes[2], 1); let cs_forward = SendEvent::from_node(&nodes[2]); + let cd_channel_used = cs_forward.msgs[0].channel_id; nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]); commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true); @@ -4129,7 +4136,7 @@ fn do_test_payment_metadata_consistency(do_reload: bool, do_modify: bool) { nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]); commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true); expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(), - &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd_2 }]); + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]); } else { expect_pending_htlcs_forwardable!(nodes[3]); expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat); @@ -4294,3 +4301,94 @@ fn peel_payment_onion_custom_tlvs() { _ => panic!() } } + +#[test] +fn test_non_strict_forwarding() { + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let mut config = test_default_channel_config(); + config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100; + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + // Create a routing node with two outbound channels, each of which can forward 2 payments of + // the given value. + let payment_value = 1_500_000; + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0); + let (chan_update_1, _, channel_id_1, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 4_950, 0); + let (chan_update_2, _, channel_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0); + + // Create a route once. + let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV) + .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap(); + let route_params = RouteParameters::from_payment_params_and_value(payment_params, payment_value); + let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap(); + + // Send 4 payments over the same route. + for i in 0..4 { + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + let mut send_event = SendEvent::from_event(msg_events.remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + msg_events = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + send_event = SendEvent::from_event(msg_events.remove(0)); + // The HTLC will be forwarded over the most appropriate channel with the corresponding peer, + // applying non-strict forwarding. + // The channel with the least amount of outbound liquidity will be used to maximize the + // probability of being able to successfully forward a subsequent HTLC. + assert_eq!(send_event.msgs[0].channel_id, if i < 2 { + channel_id_1 + } else { + channel_id_2 + }); + nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + commitment_signed_dance!(nodes[2], nodes[1], &send_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[2]); + let events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 1); + assert!(matches!(events[0], Event::PaymentClaimable { .. })); + + claim_payment_along_route( + ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) + ); + } + + // Send a 5th payment which will fail. + let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None); + nodes[0].node.send_payment_with_route(&route, payment_hash, + RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + check_added_monitors!(nodes[0], 1); + let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(msg_events.len(), 1); + let mut send_event = SendEvent::from_event(msg_events.remove(0)); + nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false); + + expect_pending_htlcs_forwardable!(nodes[1]); + check_added_monitors!(nodes[1], 1); + let routed_scid = route.paths[0].hops[1].short_channel_id; + let routed_channel_id = match routed_scid { + scid if scid == chan_update_1.contents.short_channel_id => channel_id_1, + scid if scid == chan_update_2.contents.short_channel_id => channel_id_2, + _ => panic!("Unexpected short channel id in route"), + }; + // The failure to forward will refer to the channel given in the onion. + expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(), + &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]); + + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); + let events = nodes[0].node.get_and_clear_pending_events(); + expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid)); +} diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 4911fb24..1b75755f 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -28,6 +28,7 @@ use crate::util::ser::{VecWriter, Writeable, Writer}; use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; use crate::ln::wire; use crate::ln::wire::{Encode, Type}; +use crate::onion_message::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc}; use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage, Responder, ResponseInstruction}; use crate::onion_message::offers::{OffersMessage, OffersMessageHandler}; use crate::onion_message::packet::OnionMessageContents; @@ -148,6 +149,14 @@ impl OffersMessageHandler for IgnoringMessageHandler { ResponseInstruction::NoResponse } } +impl AsyncPaymentsMessageHandler for IgnoringMessageHandler { + fn held_htlc_available( + &self, _message: HeldHtlcAvailable, _responder: Option, + ) -> ResponseInstruction { + ResponseInstruction::NoResponse + } + fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {} +} impl CustomOnionMessageHandler for IgnoringMessageHandler { type CustomMessage = Infallible; fn handle_custom_message(&self, _message: Self::CustomMessage, _responder: Option) -> ResponseInstruction { diff --git a/lightning/src/offers/mod.rs b/lightning/src/offers/mod.rs index b77eec16..e5e894e2 100644 --- a/lightning/src/offers/mod.rs +++ b/lightning/src/offers/mod.rs @@ -24,7 +24,7 @@ pub mod parse; mod payer; pub mod refund; pub(crate) mod signer; -#[allow(unused)] -pub(crate) mod static_invoice; +#[cfg(async_payments)] +pub mod static_invoice; #[cfg(test)] pub(crate) mod test_utils; diff --git a/lightning/src/offers/offer.rs b/lightning/src/offers/offer.rs index dd58c75c..253de865 100644 --- a/lightning/src/offers/offer.rs +++ b/lightning/src/offers/offer.rs @@ -665,6 +665,7 @@ impl Offer { self.contents.expects_quantity() } + #[cfg(async_payments)] pub(super) fn verify( &self, key: &ExpandedKey, secp_ctx: &Secp256k1 ) -> Result<(OfferId, Option), ()> { diff --git a/lightning/src/offers/static_invoice.rs b/lightning/src/offers/static_invoice.rs index d0846b29..69f4073e 100644 --- a/lightning/src/offers/static_invoice.rs +++ b/lightning/src/offers/static_invoice.rs @@ -15,8 +15,8 @@ use crate::ln::features::{Bolt12InvoiceFeatures, OfferFeatures}; use crate::ln::inbound_payment::ExpandedKey; use crate::ln::msgs::DecodeError; use crate::offers::invoice::{ - check_invoice_signing_pubkey, construct_payment_paths, filter_fallbacks, BlindedPathIter, - BlindedPayInfo, BlindedPayInfoIter, FallbackAddress, InvoiceTlvStream, InvoiceTlvStreamRef, + check_invoice_signing_pubkey, construct_payment_paths, filter_fallbacks, BlindedPayInfo, + FallbackAddress, InvoiceTlvStream, InvoiceTlvStreamRef, }; use crate::offers::invoice_macros::{invoice_accessors_common, invoice_builder_methods_common}; use crate::offers::merkle::{ @@ -26,9 +26,7 @@ use crate::offers::offer::{ Amount, Offer, OfferContents, OfferTlvStream, OfferTlvStreamRef, Quantity, }; use crate::offers::parse::{Bolt12ParseError, Bolt12SemanticError, ParsedMessage}; -use crate::util::ser::{ - HighZeroBytesDroppedBigSize, Iterable, SeekReadable, WithoutLength, Writeable, Writer, -}; +use crate::util::ser::{Iterable, SeekReadable, WithoutLength, Writeable, Writer}; use crate::util::string::PrintableString; use bitcoin::address::Address; use bitcoin::blockdata::constants::ChainHash; diff --git a/lightning/src/onion_message/async_payments.rs b/lightning/src/onion_message/async_payments.rs new file mode 100644 index 00000000..f5953c63 --- /dev/null +++ b/lightning/src/onion_message/async_payments.rs @@ -0,0 +1,152 @@ +// This file is Copyright its original authors, visible in version control +// history. +// +// This file is licensed under the Apache License, Version 2.0 or the MIT license +// , at your option. +// You may not use this file except in accordance with one or both of these +// licenses. + +//! Message handling for async payments. + +use crate::io; +use crate::ln::msgs::DecodeError; +use crate::onion_message::messenger::PendingOnionMessage; +use crate::onion_message::messenger::{Responder, ResponseInstruction}; +use crate::onion_message::packet::OnionMessageContents; +use crate::prelude::*; +use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; + +// TLV record types for the `onionmsg_tlv` TLV stream as defined in BOLT 4. +const HELD_HTLC_AVAILABLE_TLV_TYPE: u64 = 72; +const RELEASE_HELD_HTLC_TLV_TYPE: u64 = 74; + +/// A handler for an [`OnionMessage`] containing an async payments message as its payload. +/// +/// [`OnionMessage`]: crate::ln::msgs::OnionMessage +pub trait AsyncPaymentsMessageHandler { + /// Handle a [`HeldHtlcAvailable`] message. A [`ReleaseHeldHtlc`] should be returned to release + /// the held funds. + fn held_htlc_available( + &self, message: HeldHtlcAvailable, responder: Option, + ) -> ResponseInstruction; + + /// Handle a [`ReleaseHeldHtlc`] message. If authentication of the message succeeds, an HTLC + /// should be released to the corresponding payee. + fn release_held_htlc(&self, message: ReleaseHeldHtlc); + + /// Release any [`AsyncPaymentsMessage`]s that need to be sent. + /// + /// Typically, this is used for messages initiating an async payment flow rather than in response + /// to another message. + #[cfg(not(c_bindings))] + fn release_pending_messages(&self) -> Vec> { + vec![] + } + + /// Release any [`AsyncPaymentsMessage`]s that need to be sent. + /// + /// Typically, this is used for messages initiating a payment flow rather than in response to + /// another message. + #[cfg(c_bindings)] + fn release_pending_messages( + &self, + ) -> Vec<( + AsyncPaymentsMessage, + crate::onion_message::messenger::Destination, + Option, + )> { + vec![] + } +} + +/// Possible async payment messages sent and received via an [`OnionMessage`]. +/// +/// [`OnionMessage`]: crate::ln::msgs::OnionMessage +#[derive(Clone, Debug)] +pub enum AsyncPaymentsMessage { + /// An HTLC is being held upstream for the often-offline recipient, to be released via + /// [`ReleaseHeldHtlc`]. + HeldHtlcAvailable(HeldHtlcAvailable), + + /// Releases the HTLC corresponding to an inbound [`HeldHtlcAvailable`] message. + ReleaseHeldHtlc(ReleaseHeldHtlc), +} + +/// An HTLC destined for the recipient of this message is being held upstream. The reply path +/// accompanying this onion message should be used to send a [`ReleaseHeldHtlc`] response, which +/// will cause the upstream HTLC to be released. +#[derive(Clone, Debug)] +pub struct HeldHtlcAvailable { + /// The secret that will be used by the recipient of this message to release the held HTLC. + pub payment_release_secret: [u8; 32], +} + +/// Releases the HTLC corresponding to an inbound [`HeldHtlcAvailable`] message. +#[derive(Clone, Debug)] +pub struct ReleaseHeldHtlc { + /// Used to release the HTLC held upstream if it matches the corresponding + /// [`HeldHtlcAvailable::payment_release_secret`]. + pub payment_release_secret: [u8; 32], +} + +impl OnionMessageContents for ReleaseHeldHtlc { + fn tlv_type(&self) -> u64 { + RELEASE_HELD_HTLC_TLV_TYPE + } + fn msg_type(&self) -> &'static str { + "Release Held HTLC" + } +} + +impl_writeable_tlv_based!(HeldHtlcAvailable, { + (0, payment_release_secret, required), +}); + +impl_writeable_tlv_based!(ReleaseHeldHtlc, { + (0, payment_release_secret, required), +}); + +impl AsyncPaymentsMessage { + /// Returns whether `tlv_type` corresponds to a TLV record for async payment messages. + pub fn is_known_type(tlv_type: u64) -> bool { + match tlv_type { + HELD_HTLC_AVAILABLE_TLV_TYPE | RELEASE_HELD_HTLC_TLV_TYPE => true, + _ => false, + } + } +} + +impl OnionMessageContents for AsyncPaymentsMessage { + fn tlv_type(&self) -> u64 { + match self { + Self::HeldHtlcAvailable(_) => HELD_HTLC_AVAILABLE_TLV_TYPE, + Self::ReleaseHeldHtlc(msg) => msg.tlv_type(), + } + } + fn msg_type(&self) -> &'static str { + match &self { + Self::HeldHtlcAvailable(_) => "Held HTLC Available", + Self::ReleaseHeldHtlc(msg) => msg.msg_type(), + } + } +} + +impl Writeable for AsyncPaymentsMessage { + fn write(&self, w: &mut W) -> Result<(), io::Error> { + match self { + Self::HeldHtlcAvailable(message) => message.write(w), + Self::ReleaseHeldHtlc(message) => message.write(w), + } + } +} + +impl ReadableArgs for AsyncPaymentsMessage { + fn read(r: &mut R, tlv_type: u64) -> Result { + match tlv_type { + HELD_HTLC_AVAILABLE_TLV_TYPE => Ok(Self::HeldHtlcAvailable(Readable::read(r)?)), + RELEASE_HELD_HTLC_TLV_TYPE => Ok(Self::ReleaseHeldHtlc(Readable::read(r)?)), + _ => Err(DecodeError::InvalidValue), + } + } +} diff --git a/lightning/src/onion_message/functional_tests.rs b/lightning/src/onion_message/functional_tests.rs index 08be1b2c..40b61779 100644 --- a/lightning/src/onion_message/functional_tests.rs +++ b/lightning/src/onion_message/functional_tests.rs @@ -19,6 +19,7 @@ use crate::routing::test_utils::{add_channel, add_or_update_node}; use crate::sign::{NodeSigner, Recipient}; use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer}; use crate::util::test_utils; +use super::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc}; use super::messenger::{CustomOnionMessageHandler, DefaultMessageRouter, Destination, OnionMessagePath, OnionMessenger, PendingOnionMessage, Responder, ResponseInstruction, SendError, SendSuccess}; use super::offers::{OffersMessage, OffersMessageHandler}; use super::packet::{OnionMessageContents, Packet}; @@ -50,6 +51,7 @@ struct MessengerNode { Arc >>, Arc, + Arc, Arc >, custom_message_handler: Arc, @@ -79,6 +81,17 @@ impl OffersMessageHandler for TestOffersMessageHandler { } } +struct TestAsyncPaymentsMessageHandler {} + +impl AsyncPaymentsMessageHandler for TestAsyncPaymentsMessageHandler { + fn held_htlc_available( + &self, _message: HeldHtlcAvailable, _responder: Option, + ) -> ResponseInstruction { + ResponseInstruction::NoResponse + } + fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {} +} + #[derive(Clone, Debug, PartialEq)] enum TestCustomMessage { Ping, @@ -249,18 +262,19 @@ fn create_nodes_using_cfgs(cfgs: Vec) -> Vec { DefaultMessageRouter::new(network_graph.clone(), entropy_source.clone()) ); let offers_message_handler = Arc::new(TestOffersMessageHandler {}); + let async_payments_message_handler = Arc::new(TestAsyncPaymentsMessageHandler {}); let custom_message_handler = Arc::new(TestCustomMessageHandler::new()); let messenger = if cfg.intercept_offline_peer_oms { OnionMessenger::new_with_offline_peer_interception( entropy_source.clone(), node_signer.clone(), logger.clone(), node_id_lookup, message_router, offers_message_handler, - custom_message_handler.clone() + async_payments_message_handler, custom_message_handler.clone() ) } else { OnionMessenger::new( entropy_source.clone(), node_signer.clone(), logger.clone(), node_id_lookup, message_router, offers_message_handler, - custom_message_handler.clone() + async_payments_message_handler, custom_message_handler.clone() ) }; nodes.push(MessengerNode { @@ -492,8 +506,9 @@ fn async_response_with_reply_path_fails() { let path_id = Some([2; 32]); let reply_path = BlindedPath::new_for_message(&[], bob.node_id, &*bob.entropy_source, &secp_ctx).unwrap(); - // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced. - // Therefore, the reply_path cannot be used for the response. + // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced and + // disconnected. Thus, a reply path could no be created for the response. + disconnect_peers(alice, bob); let responder = Responder::new(reply_path, path_id); alice.custom_message_handler.expect_message_and_response(message.clone()); let response_instruction = alice.custom_message_handler.handle_custom_message(message, Some(responder)); diff --git a/lightning/src/onion_message/messenger.rs b/lightning/src/onion_message/messenger.rs index 0fb72c52..21012bd7 100644 --- a/lightning/src/onion_message/messenger.rs +++ b/lightning/src/onion_message/messenger.rs @@ -24,6 +24,9 @@ use crate::ln::features::{InitFeatures, NodeFeatures}; use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress}; use crate::ln::onion_utils; use crate::routing::gossip::{NetworkGraph, NodeId, ReadOnlyNetworkGraph}; +use super::async_payments::AsyncPaymentsMessageHandler; +#[cfg(async_payments)] +use super::async_payments::AsyncPaymentsMessage; use super::packet::OnionMessageContents; use super::packet::ParsedOnionMessageContents; use super::offers::OffersMessageHandler; @@ -76,22 +79,27 @@ pub trait AOnionMessenger { type OffersMessageHandler: OffersMessageHandler + ?Sized; /// A type that may be dereferenced to [`Self::OffersMessageHandler`] type OMH: Deref; + /// A type implementing [`AsyncPaymentsMessageHandler`] + type AsyncPaymentsMessageHandler: AsyncPaymentsMessageHandler + ?Sized; + /// A type that may be dereferenced to [`Self::AsyncPaymentsMessageHandler`] + type APH: Deref; /// A type implementing [`CustomOnionMessageHandler`] type CustomOnionMessageHandler: CustomOnionMessageHandler + ?Sized; /// A type that may be dereferenced to [`Self::CustomOnionMessageHandler`] type CMH: Deref; /// Returns a reference to the actual [`OnionMessenger`] object. - fn get_om(&self) -> &OnionMessenger; + fn get_om(&self) -> &OnionMessenger; } -impl AOnionMessenger -for OnionMessenger where +impl AOnionMessenger +for OnionMessenger where ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, + APH:: Target: AsyncPaymentsMessageHandler, CMH::Target: CustomOnionMessageHandler, { type EntropySource = ES::Target; @@ -106,9 +114,11 @@ for OnionMessenger where type MR = MR; type OffersMessageHandler = OMH::Target; type OMH = OMH; + type AsyncPaymentsMessageHandler = APH::Target; + type APH = APH; type CustomOnionMessageHandler = CMH::Target; type CMH = CMH; - fn get_om(&self) -> &OnionMessenger { self } + fn get_om(&self) -> &OnionMessenger { self } } /// A sender, receiver and forwarder of [`OnionMessage`]s. @@ -180,11 +190,12 @@ for OnionMessenger where /// # let message_router = Arc::new(FakeMessageRouter {}); /// # let custom_message_handler = IgnoringMessageHandler {}; /// # let offers_message_handler = IgnoringMessageHandler {}; +/// # let async_payments_message_handler = IgnoringMessageHandler {}; /// // Create the onion messenger. This must use the same `keys_manager` as is passed to your /// // ChannelManager. /// let onion_messenger = OnionMessenger::new( /// &keys_manager, &keys_manager, logger, &node_id_lookup, message_router, -/// &offers_message_handler, &custom_message_handler +/// &offers_message_handler, &async_payments_message_handler, &custom_message_handler /// ); /// # #[derive(Debug)] @@ -225,14 +236,16 @@ for OnionMessenger where /// /// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest /// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice -pub struct OnionMessenger -where +pub struct OnionMessenger< + ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, APH: Deref, CMH: Deref +> where ES::Target: EntropySource, NS::Target: NodeSigner, L::Target: Logger, NL::Target: NodeIdLookUp, MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, + APH::Target: AsyncPaymentsMessageHandler, CMH::Target: CustomOnionMessageHandler, { entropy_source: ES, @@ -243,6 +256,8 @@ where node_id_lookup: NL, message_router: MR, offers_handler: OMH, + #[allow(unused)] + async_payments_handler: APH, custom_handler: CMH, intercept_messages_for_offline_peers: bool, pending_events: Mutex, @@ -489,7 +504,7 @@ where } fn create_blinded_paths_from_iter< - I: Iterator, + I: ExactSizeIterator, T: secp256k1::Signing + secp256k1::Verification >( &self, recipient: PublicKey, peers: I, secp_ctx: &Secp256k1, compact_paths: bool @@ -505,13 +520,20 @@ where let is_recipient_announced = network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)); + let has_one_peer = peers.len() == 1; let mut peer_info = peers - // Limit to peers with announced channels + // Limit to peers with announced channels unless the recipient is unannounced. .filter_map(|peer| network_graph .node(&NodeId::from_pubkey(&peer.node_id)) - .filter(|info| info.channels.len() >= MIN_PEER_CHANNELS) + .filter(|info| + !is_recipient_announced || info.channels.len() >= MIN_PEER_CHANNELS + ) .map(|info| (peer, info.is_tor_only(), info.channels.len())) + // Allow messages directly with the only peer when unannounced. + .or_else(|| (!is_recipient_announced && has_one_peer) + .then(|| (peer, false, 0)) + ) ) // Exclude Tor-only nodes when the recipient is announced. .filter(|(_, is_tor_only, _)| !(*is_tor_only && is_recipient_announced)) @@ -986,8 +1008,8 @@ where } } -impl -OnionMessenger +impl +OnionMessenger where ES::Target: EntropySource, NS::Target: NodeSigner, @@ -995,17 +1017,18 @@ where NL::Target: NodeIdLookUp, MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, + APH::Target: AsyncPaymentsMessageHandler, CMH::Target: CustomOnionMessageHandler, { /// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to /// their respective handlers. pub fn new( entropy_source: ES, node_signer: NS, logger: L, node_id_lookup: NL, message_router: MR, - offers_handler: OMH, custom_handler: CMH + offers_handler: OMH, async_payments_handler: APH, custom_handler: CMH ) -> Self { Self::new_inner( entropy_source, node_signer, logger, node_id_lookup, message_router, - offers_handler, custom_handler, false + offers_handler, async_payments_handler, custom_handler, false ) } @@ -1032,17 +1055,17 @@ where /// peers. pub fn new_with_offline_peer_interception( entropy_source: ES, node_signer: NS, logger: L, node_id_lookup: NL, - message_router: MR, offers_handler: OMH, custom_handler: CMH + message_router: MR, offers_handler: OMH, async_payments_handler: APH, custom_handler: CMH ) -> Self { Self::new_inner( entropy_source, node_signer, logger, node_id_lookup, message_router, - offers_handler, custom_handler, true + offers_handler, async_payments_handler, custom_handler, true ) } fn new_inner( entropy_source: ES, node_signer: NS, logger: L, node_id_lookup: NL, - message_router: MR, offers_handler: OMH, custom_handler: CMH, + message_router: MR, offers_handler: OMH, async_payments_handler: APH, custom_handler: CMH, intercept_messages_for_offline_peers: bool ) -> Self { let mut secp_ctx = Secp256k1::new(); @@ -1056,6 +1079,7 @@ where node_id_lookup, message_router, offers_handler, + async_payments_handler, custom_handler, intercept_messages_for_offline_peers, pending_events: Mutex::new(PendingEvents { @@ -1360,8 +1384,8 @@ fn outbound_buffer_full(peer_node_id: &PublicKey, buffer: &HashMap EventsProvider -for OnionMessenger +impl EventsProvider +for OnionMessenger where ES::Target: EntropySource, NS::Target: NodeSigner, @@ -1369,6 +1393,7 @@ where NL::Target: NodeIdLookUp, MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, + APH::Target: AsyncPaymentsMessageHandler, CMH::Target: CustomOnionMessageHandler, { fn process_pending_events(&self, handler: H) where H::Target: EventHandler { @@ -1400,8 +1425,8 @@ where } } -impl OnionMessageHandler -for OnionMessenger +impl OnionMessageHandler +for OnionMessenger where ES::Target: EntropySource, NS::Target: NodeSigner, @@ -1409,6 +1434,7 @@ where NL::Target: NodeIdLookUp, MR::Target: MessageRouter, OMH::Target: OffersMessageHandler, + APH::Target: AsyncPaymentsMessageHandler, CMH::Target: CustomOnionMessageHandler, { fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage) { @@ -1420,18 +1446,26 @@ where "Received an onion message with path_id {:02x?} and {} reply_path: {:?}", path_id, if reply_path.is_some() { "a" } else { "no" }, message); + let responder = reply_path.map( + |reply_path| Responder::new(reply_path, path_id) + ); match message { ParsedOnionMessageContents::Offers(msg) => { - let responder = reply_path.map( - |reply_path| Responder::new(reply_path, path_id) - ); let response_instructions = self.offers_handler.handle_message(msg, responder); let _ = self.handle_onion_message_response(response_instructions); }, - ParsedOnionMessageContents::Custom(msg) => { - let responder = reply_path.map( - |reply_path| Responder::new(reply_path, path_id) + #[cfg(async_payments)] + ParsedOnionMessageContents::AsyncPayments(AsyncPaymentsMessage::HeldHtlcAvailable(msg)) => { + let response_instructions = self.async_payments_handler.held_htlc_available( + msg, responder ); + let _ = self.handle_onion_message_response(response_instructions); + }, + #[cfg(async_payments)] + ParsedOnionMessageContents::AsyncPayments(AsyncPaymentsMessage::ReleaseHeldHtlc(msg)) => { + self.async_payments_handler.release_held_htlc(msg); + }, + ParsedOnionMessageContents::Custom(msg) => { let response_instructions = self.custom_handler.handle_custom_message(msg, responder); let _ = self.handle_onion_message_response(response_instructions); }, @@ -1599,6 +1633,7 @@ pub type SimpleArcOnionMessenger = OnionMessenger< Arc>, Arc>>, Arc, Arc>>, Arc>, + Arc>, IgnoringMessageHandler >; @@ -1619,6 +1654,7 @@ pub type SimpleRefOnionMessenger< &'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>, &'j DefaultMessageRouter<&'g NetworkGraph<&'b L>, &'b L, &'a KeysManager>, &'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>, + &'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>, IgnoringMessageHandler >; diff --git a/lightning/src/onion_message/mod.rs b/lightning/src/onion_message/mod.rs index 05a8b7d6..1693c5a9 100644 --- a/lightning/src/onion_message/mod.rs +++ b/lightning/src/onion_message/mod.rs @@ -21,6 +21,7 @@ //! [blinded paths]: crate::blinded_path::BlindedPath //! [`OnionMessenger`]: self::messenger::OnionMessenger +pub mod async_payments; pub mod messenger; pub mod offers; pub mod packet; diff --git a/lightning/src/onion_message/offers.rs b/lightning/src/onion_message/offers.rs index 42c69141..397f4b8a 100644 --- a/lightning/src/onion_message/offers.rs +++ b/lightning/src/onion_message/offers.rs @@ -16,6 +16,8 @@ use crate::offers::invoice_error::InvoiceError; use crate::offers::invoice_request::InvoiceRequest; use crate::offers::invoice::Bolt12Invoice; use crate::offers::parse::Bolt12ParseError; +#[cfg(async_payments)] +use crate::offers::static_invoice::StaticInvoice; use crate::onion_message::packet::OnionMessageContents; use crate::util::logger::Logger; use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer}; @@ -29,6 +31,8 @@ use crate::prelude::*; const INVOICE_REQUEST_TLV_TYPE: u64 = 64; const INVOICE_TLV_TYPE: u64 = 66; const INVOICE_ERROR_TLV_TYPE: u64 = 68; +#[cfg(async_payments)] +const STATIC_INVOICE_TLV_TYPE: u64 = 70; /// A handler for an [`OnionMessage`] containing a BOLT 12 Offers message as its payload. /// @@ -72,6 +76,10 @@ pub enum OffersMessage { /// [`Refund`]: crate::offers::refund::Refund Invoice(Bolt12Invoice), + #[cfg(async_payments)] + /// A [`StaticInvoice`] sent in response to an [`InvoiceRequest`]. + StaticInvoice(StaticInvoice), + /// An error from handling an [`OffersMessage`]. InvoiceError(InvoiceError), } @@ -80,7 +88,11 @@ impl OffersMessage { /// Returns whether `tlv_type` corresponds to a TLV record for Offers. pub fn is_known_type(tlv_type: u64) -> bool { match tlv_type { - INVOICE_REQUEST_TLV_TYPE | INVOICE_TLV_TYPE | INVOICE_ERROR_TLV_TYPE => true, + INVOICE_REQUEST_TLV_TYPE + | INVOICE_TLV_TYPE + | INVOICE_ERROR_TLV_TYPE => true, + #[cfg(async_payments)] + STATIC_INVOICE_TLV_TYPE => true, _ => false, } } @@ -89,6 +101,8 @@ impl OffersMessage { match tlv_type { INVOICE_REQUEST_TLV_TYPE => Ok(Self::InvoiceRequest(InvoiceRequest::try_from(bytes)?)), INVOICE_TLV_TYPE => Ok(Self::Invoice(Bolt12Invoice::try_from(bytes)?)), + #[cfg(async_payments)] + STATIC_INVOICE_TLV_TYPE => Ok(Self::StaticInvoice(StaticInvoice::try_from(bytes)?)), _ => Err(Bolt12ParseError::Decode(DecodeError::InvalidValue)), } } @@ -103,6 +117,10 @@ impl fmt::Debug for OffersMessage { OffersMessage::Invoice(message) => { write!(f, "{:?}", message.as_tlv_stream()) } + #[cfg(async_payments)] + OffersMessage::StaticInvoice(message) => { + write!(f, "{:?}", message) + } OffersMessage::InvoiceError(message) => { write!(f, "{:?}", message) } @@ -115,6 +133,8 @@ impl OnionMessageContents for OffersMessage { match self { OffersMessage::InvoiceRequest(_) => INVOICE_REQUEST_TLV_TYPE, OffersMessage::Invoice(_) => INVOICE_TLV_TYPE, + #[cfg(async_payments)] + OffersMessage::StaticInvoice(_) => STATIC_INVOICE_TLV_TYPE, OffersMessage::InvoiceError(_) => INVOICE_ERROR_TLV_TYPE, } } @@ -122,6 +142,8 @@ impl OnionMessageContents for OffersMessage { match &self { OffersMessage::InvoiceRequest(_) => "Invoice Request", OffersMessage::Invoice(_) => "Invoice", + #[cfg(async_payments)] + OffersMessage::StaticInvoice(_) => "Static Invoice", OffersMessage::InvoiceError(_) => "Invoice Error", } } @@ -132,6 +154,8 @@ impl Writeable for OffersMessage { match self { OffersMessage::InvoiceRequest(message) => message.write(w), OffersMessage::Invoice(message) => message.write(w), + #[cfg(async_payments)] + OffersMessage::StaticInvoice(message) => message.write(w), OffersMessage::InvoiceError(message) => message.write(w), } } diff --git a/lightning/src/onion_message/packet.rs b/lightning/src/onion_message/packet.rs index 75ec3cd9..fca7dd6a 100644 --- a/lightning/src/onion_message/packet.rs +++ b/lightning/src/onion_message/packet.rs @@ -17,6 +17,8 @@ use crate::blinded_path::message::{ForwardTlvs, ReceiveTlvs}; use crate::blinded_path::utils::Padding; use crate::ln::msgs::DecodeError; use crate::ln::onion_utils; +#[cfg(async_payments)] +use super::async_payments::AsyncPaymentsMessage; use super::messenger::CustomOnionMessageHandler; use super::offers::OffersMessage; use crate::crypto::streams::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter}; @@ -128,6 +130,9 @@ pub(super) enum Payload { pub enum ParsedOnionMessageContents { /// A message related to BOLT 12 Offers. Offers(OffersMessage), + /// A message related to async payments. + #[cfg(async_payments)] + AsyncPayments(AsyncPaymentsMessage), /// A custom onion message specified by the user. Custom(T), } @@ -139,12 +144,16 @@ impl OnionMessageContents for ParsedOnionMessageContent fn tlv_type(&self) -> u64 { match self { &ParsedOnionMessageContents::Offers(ref msg) => msg.tlv_type(), + #[cfg(async_payments)] + &ParsedOnionMessageContents::AsyncPayments(ref msg) => msg.tlv_type(), &ParsedOnionMessageContents::Custom(ref msg) => msg.tlv_type(), } } fn msg_type(&self) -> &'static str { match self { ParsedOnionMessageContents::Offers(ref msg) => msg.msg_type(), + #[cfg(async_payments)] + ParsedOnionMessageContents::AsyncPayments(ref msg) => msg.msg_type(), ParsedOnionMessageContents::Custom(ref msg) => msg.msg_type(), } } @@ -154,6 +163,8 @@ impl Writeable for ParsedOnionMessageContents { fn write(&self, w: &mut W) -> Result<(), io::Error> { match self { ParsedOnionMessageContents::Offers(msg) => Ok(msg.write(w)?), + #[cfg(async_payments)] + ParsedOnionMessageContents::AsyncPayments(msg) => Ok(msg.write(w)?), ParsedOnionMessageContents::Custom(msg) => Ok(msg.write(w)?), } } @@ -255,6 +266,12 @@ for Payload::CustomM message = Some(ParsedOnionMessageContents::Offers(msg)); Ok(true) }, + #[cfg(async_payments)] + tlv_type if AsyncPaymentsMessage::is_known_type(tlv_type) => { + let msg = AsyncPaymentsMessage::read(msg_reader, tlv_type)?; + message = Some(ParsedOnionMessageContents::AsyncPayments(msg)); + Ok(true) + }, _ => match handler.read_custom_message(msg_type, msg_reader)? { Some(msg) => { message = Some(ParsedOnionMessageContents::Custom(msg)); diff --git a/lightning/src/routing/router.rs b/lightning/src/routing/router.rs index 9a88d913..28aaba05 100644 --- a/lightning/src/routing/router.rs +++ b/lightning/src/routing/router.rs @@ -100,16 +100,31 @@ impl> + Clone, L: Deref, ES: Deref, S: Deref, // recipient's node_id. const MIN_PEER_CHANNELS: usize = 3; + let has_one_peer = first_hops + .first() + .map(|details| details.counterparty.node_id) + .map(|node_id| first_hops + .iter() + .skip(1) + .all(|details| details.counterparty.node_id == node_id) + ) + .unwrap_or(false); + let network_graph = self.network_graph.deref().read_only(); + let is_recipient_announced = + network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient)); + let paths = first_hops.into_iter() .filter(|details| details.counterparty.features.supports_route_blinding()) .filter(|details| amount_msats <= details.inbound_capacity_msat) .filter(|details| amount_msats >= details.inbound_htlc_minimum_msat.unwrap_or(0)) .filter(|details| amount_msats <= details.inbound_htlc_maximum_msat.unwrap_or(u64::MAX)) + // Limit to peers with announced channels unless the recipient is unannounced. .filter(|details| network_graph .node(&NodeId::from_pubkey(&details.counterparty.node_id)) - .map(|node_info| node_info.channels.len() >= MIN_PEER_CHANNELS) - .unwrap_or(false) + .map(|node| !is_recipient_announced || node.channels.len() >= MIN_PEER_CHANNELS) + // Allow payments directly with the only peer when unannounced. + .unwrap_or(!is_recipient_announced && has_one_peer) ) .filter_map(|details| { let short_channel_id = match details.get_inbound_payment_scid() { @@ -1031,6 +1046,13 @@ impl Payee { } } + pub(crate) fn blinded_route_hints_mut(&mut self) -> &mut [(BlindedPayInfo, BlindedPath)] { + match self { + Self::Blinded { route_hints, .. } => &mut route_hints[..], + Self::Clear { .. } => &mut [] + } + } + fn unblinded_route_hints(&self) -> &[RouteHint] { match self { Self::Blinded { .. } => &[], diff --git a/lightning/src/util/ser_macros.rs b/lightning/src/util/ser_macros.rs index 740b7c12..255fd3eb 100644 --- a/lightning/src/util/ser_macros.rs +++ b/lightning/src/util/ser_macros.rs @@ -633,7 +633,7 @@ macro_rules! impl_writeable_msg { $($crate::_init_tlv_field_var!($tlvfield, $fieldty);)* $crate::decode_tlv_stream!(r, {$(($type, $tlvfield, $fieldty)),*}); Ok(Self { - $($field),*, + $($field,)* $($tlvfield),* }) } @@ -1531,4 +1531,18 @@ mod tests { fn simple_test_tlv_write() { do_simple_test_tlv_write().unwrap(); } + + #[derive(Debug, Eq, PartialEq)] + struct EmptyMsg {} + impl_writeable_msg!(EmptyMsg, {}, {}); + + #[test] + fn impl_writeable_msg_empty() { + let msg = EmptyMsg {}; + let mut encoded_msg = msg.encode(); + assert!(encoded_msg.is_empty()); + let mut encoded_msg_stream = Cursor::new(&mut encoded_msg); + let decoded_msg: EmptyMsg = Readable::read(&mut encoded_msg_stream).unwrap(); + assert_eq!(msg, decoded_msg); + } } diff --git a/lightning/src/util/test_channel_signer.rs b/lightning/src/util/test_channel_signer.rs index 2009e4d0..884acc2c 100644 --- a/lightning/src/util/test_channel_signer.rs +++ b/lightning/src/util/test_channel_signer.rs @@ -18,7 +18,7 @@ use crate::sign::ecdsa::EcdsaChannelSigner; #[allow(unused_imports)] use crate::prelude::*; -use core::cmp; +use core::{cmp, fmt}; use crate::sync::{Mutex, Arc}; #[cfg(test)] use crate::sync::MutexGuard; @@ -71,9 +71,46 @@ pub struct TestChannelSigner { /// Channel state used for policy enforcement pub state: Arc>, pub disable_revocation_policy_check: bool, - /// When `true` (the default), the signer will respond immediately with signatures. When `false`, - /// the signer will return an error indicating that it is unavailable. - pub available: Arc>, + /// Set of signer operations that are disabled. If an operation is disabled, + /// the signer will return `Err` when the corresponding method is called. + pub disabled_signer_ops: Arc>>, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum SignerOp { + GetPerCommitmentPoint, + ReleaseCommitmentSecret, + ValidateHolderCommitment, + SignCounterpartyCommitment, + ValidateCounterpartyRevocation, + SignHolderCommitment, + SignJusticeRevokedOutput, + SignJusticeRevokedHtlc, + SignHolderHtlcTransaction, + SignCounterpartyHtlcTransaction, + SignClosingTransaction, + SignHolderAnchorInput, + SignChannelAnnouncementWithFundingKey, +} + +impl fmt::Display for SignerOp { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + SignerOp::GetPerCommitmentPoint => write!(f, "get_per_commitment_point"), + SignerOp::ReleaseCommitmentSecret => write!(f, "release_commitment_secret"), + SignerOp::ValidateHolderCommitment => write!(f, "validate_holder_commitment"), + SignerOp::SignCounterpartyCommitment => write!(f, "sign_counterparty_commitment"), + SignerOp::ValidateCounterpartyRevocation => write!(f, "validate_counterparty_revocation"), + SignerOp::SignHolderCommitment => write!(f, "sign_holder_commitment"), + SignerOp::SignJusticeRevokedOutput => write!(f, "sign_justice_revoked_output"), + SignerOp::SignJusticeRevokedHtlc => write!(f, "sign_justice_revoked_htlc"), + SignerOp::SignHolderHtlcTransaction => write!(f, "sign_holder_htlc_transaction"), + SignerOp::SignCounterpartyHtlcTransaction => write!(f, "sign_counterparty_htlc_transaction"), + SignerOp::SignClosingTransaction => write!(f, "sign_closing_transaction"), + SignerOp::SignHolderAnchorInput => write!(f, "sign_holder_anchor_input"), + SignerOp::SignChannelAnnouncementWithFundingKey => write!(f, "sign_channel_announcement_with_funding_key"), + } + } } impl PartialEq for TestChannelSigner { @@ -90,7 +127,7 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check: false, - available: Arc::new(Mutex::new(true)), + disabled_signer_ops: Arc::new(Mutex::new(new_hash_set())), } } @@ -104,7 +141,7 @@ impl TestChannelSigner { inner, state, disable_revocation_policy_check, - available: Arc::new(Mutex::new(true)), + disabled_signer_ops: Arc::new(Mutex::new(new_hash_set())), } } @@ -115,13 +152,16 @@ impl TestChannelSigner { self.state.lock().unwrap() } - /// Marks the signer's availability. - /// - /// When `true`, methods are forwarded to the underlying signer as normal. When `false`, some - /// methods will return `Err` indicating that the signer is unavailable. Intended to be used for - /// testing asynchronous signing. - pub fn set_available(&self, available: bool) { - *self.available.lock().unwrap() = available; + pub fn enable_op(&mut self, signer_op: SignerOp) { + self.disabled_signer_ops.lock().unwrap().remove(&signer_op); + } + + pub fn disable_op(&mut self, signer_op: SignerOp) { + self.disabled_signer_ops.lock().unwrap().insert(signer_op); + } + + fn is_signer_available(&self, signer_op: SignerOp) -> bool { + !self.disabled_signer_ops.lock().unwrap().contains(&signer_op) } } @@ -149,7 +189,7 @@ impl ChannelSigner for TestChannelSigner { } fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::ValidateCounterpartyRevocation) { return Err(()); } let mut state = self.state.lock().unwrap(); @@ -172,7 +212,7 @@ impl EcdsaChannelSigner for TestChannelSigner { self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx); { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) { return Err(()); } let mut state = self.state.lock().unwrap(); @@ -191,7 +231,7 @@ impl EcdsaChannelSigner for TestChannelSigner { } fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1) -> Result { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::SignHolderCommitment) { return Err(()); } let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx); @@ -212,14 +252,14 @@ impl EcdsaChannelSigner for TestChannelSigner { } fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1) -> Result { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::SignJusticeRevokedOutput) { return Err(()); } Ok(EcdsaChannelSigner::sign_justice_revoked_output(&self.inner, justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap()) } fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1) -> Result { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::SignJusticeRevokedHtlc) { return Err(()); } Ok(EcdsaChannelSigner::sign_justice_revoked_htlc(&self.inner, justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap()) @@ -229,7 +269,7 @@ impl EcdsaChannelSigner for TestChannelSigner { &self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor, secp_ctx: &Secp256k1 ) -> Result { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::SignHolderHtlcTransaction) { return Err(()); } let state = self.state.lock().unwrap(); @@ -265,7 +305,7 @@ impl EcdsaChannelSigner for TestChannelSigner { } fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1) -> Result { - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::SignCounterpartyHtlcTransaction) { return Err(()); } Ok(EcdsaChannelSigner::sign_counterparty_htlc_transaction(&self.inner, htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap()) @@ -284,7 +324,7 @@ impl EcdsaChannelSigner for TestChannelSigner { // As long as our minimum dust limit is enforced and is greater than our anchor output // value, an anchor output can only have an index within [0, 1]. assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1); - if !*self.available.lock().unwrap() { + if !self.is_signer_available(SignerOp::SignHolderAnchorInput) { return Err(()); } EcdsaChannelSigner::sign_holder_anchor_input(&self.inner, anchor_tx, input, secp_ctx) diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 2fea6109..4b2c3c2e 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -79,6 +79,8 @@ use std::time::{SystemTime, UNIX_EPOCH}; use bitcoin::psbt::Psbt; use bitcoin::Sequence; +use super::test_channel_signer::SignerOp; + pub fn pubkey(byte: u8) -> PublicKey { let secp_ctx = Secp256k1::new(); PublicKey::from_secret_key(&secp_ctx, &privkey(byte)) @@ -545,12 +547,16 @@ pub struct TestPersister { /// /// [`ChannelMonitor`]: channelmonitor::ChannelMonitor pub offchain_monitor_updates: Mutex>>, + /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the + /// monitor's funding outpoint here. + pub chain_sync_monitor_persistences: Mutex> } impl TestPersister { pub fn new() -> Self { Self { update_rets: Mutex::new(VecDeque::new()), offchain_monitor_updates: Mutex::new(new_hash_map()), + chain_sync_monitor_persistences: Mutex::new(VecDeque::new()) } } @@ -573,15 +579,18 @@ impl chainmonitor::Persist for ret = update_ret; } - if let Some(update) = update { + if let Some(update) = update { self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update.update_id); + } else { + self.chain_sync_monitor_persistences.lock().unwrap().push_back(funding_txo); } ret } fn archive_persisted_channel(&self, funding_txo: OutPoint) { - // remove the channel from the offchain_monitor_updates map + // remove the channel from the offchain_monitor_updates and chain_sync_monitor_persistences. self.offchain_monitor_updates.lock().unwrap().remove(&funding_txo); + self.chain_sync_monitor_persistences.lock().unwrap().retain(|x| x != &funding_txo); } } @@ -1215,6 +1224,7 @@ pub struct TestKeysInterface { enforcement_states: Mutex>>>, expectations: Mutex>>, pub unavailable_signers: Mutex>, + pub unavailable_signers_ops: Mutex>>, } impl EntropySource for TestKeysInterface { @@ -1273,9 +1283,11 @@ impl SignerProvider for TestKeysInterface { fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner { let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id); let state = self.make_enforcement_state_cell(keys.commitment_seed); - let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check); - if self.unavailable_signers.lock().unwrap().contains(&channel_keys_id) { - signer.set_available(false); + let mut signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check); + if let Some(ops) = self.unavailable_signers_ops.lock().unwrap().get(&channel_keys_id) { + for &op in ops { + signer.disable_op(op); + } } signer } @@ -1316,6 +1328,7 @@ impl TestKeysInterface { enforcement_states: Mutex::new(new_hash_map()), expectations: Mutex::new(None), unavailable_signers: Mutex::new(new_hash_set()), + unavailable_signers_ops: Mutex::new(new_hash_map()), } } diff --git a/rustfmt_excluded_files b/rustfmt_excluded_files index cb3d02ec..50033002 100644 --- a/rustfmt_excluded_files +++ b/rustfmt_excluded_files @@ -1,4 +1,3 @@ -./bench/benches/bench.rs ./lightning-background-processor/src/lib.rs ./lightning-block-sync/src/convert.rs ./lightning-block-sync/src/gossip.rs @@ -24,9 +23,6 @@ ./lightning-persister/src/lib.rs ./lightning-persister/src/test_utils.rs ./lightning-persister/src/utils.rs -./lightning-rapid-gossip-sync/src/error.rs -./lightning-rapid-gossip-sync/src/lib.rs -./lightning-rapid-gossip-sync/src/processing.rs ./lightning/src/blinded_path/message.rs ./lightning/src/blinded_path/mod.rs ./lightning/src/blinded_path/payment.rs