pass
elif cfg == "splicing":
pass
+ elif cfg == "async_payments":
+ pass
else:
print("Bad cfg tag: " + cfg)
assert False
RUSTFLAGS="--cfg=dual_funding" cargo test --verbose --color always -p lightning
[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
RUSTFLAGS="--cfg=splicing" cargo test --verbose --color always -p lightning
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
+RUSTFLAGS="--cfg=async_payments" cargo test --verbose --color always -p lightning
let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
let state = self.make_enforcement_state_cell(inner.commitment_seed);
- Ok(TestChannelSigner {
- inner,
- state,
- disable_revocation_policy_check: false,
- available: Arc::new(Mutex::new(true)),
- })
+ Ok(TestChannelSigner::new_with_revoked(inner, state, false))
}
fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
- // Two feerate requests to check dust exposure
- ext_from_hex("00fd00fd", &mut test);
+ // Three feerate requests to check dust exposure
+ ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
// we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
- // Two feerate requests to check dust exposure
- ext_from_hex("00fd00fd", &mut test);
+ // Three feerate requests to check dust exposure
+ ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
// we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
- // Two feerate requests to check dust exposure
- ext_from_hex("00fd00fd", &mut test);
+ // Three feerate requests to check dust exposure
+ ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
// connect a block with one transaction of len 125
use lightning::ln::script::ShutdownScript;
use lightning::offers::invoice::UnsignedBolt12Invoice;
use lightning::offers::invoice_request::UnsignedInvoiceRequest;
+use lightning::onion_message::async_payments::{
+ AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc,
+};
use lightning::onion_message::messenger::{
CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger,
PendingOnionMessage, Responder, ResponseInstruction,
let node_id_lookup = EmptyNodeIdLookUp {};
let message_router = TestMessageRouter {};
let offers_msg_handler = TestOffersMessageHandler {};
+ let async_payments_msg_handler = TestAsyncPaymentsMessageHandler {};
let custom_msg_handler = TestCustomMessageHandler {};
let onion_messenger = OnionMessenger::new(
&keys_manager,
&node_id_lookup,
&message_router,
&offers_msg_handler,
+ &async_payments_msg_handler,
&custom_msg_handler,
);
}
}
+struct TestAsyncPaymentsMessageHandler {}
+
+impl AsyncPaymentsMessageHandler for TestAsyncPaymentsMessageHandler {
+ fn held_htlc_available(
+ &self, message: HeldHtlcAvailable, responder: Option<Responder>,
+ ) -> ResponseInstruction<ReleaseHeldHtlc> {
+ let responder = match responder {
+ Some(resp) => resp,
+ None => return ResponseInstruction::NoResponse,
+ };
+ responder
+ .respond(ReleaseHeldHtlc { payment_release_secret: message.payment_release_secret })
+ }
+ fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {}
+}
+
#[derive(Debug)]
struct TestCustomMessage {}
/// # type NetworkGraph = lightning::routing::gossip::NetworkGraph<Arc<Logger>>;
/// # type P2PGossipSync<UL> = lightning::routing::gossip::P2PGossipSync<Arc<NetworkGraph>, Arc<UL>, Arc<Logger>>;
/// # type ChannelManager<B, F, FE> = lightning::ln::channelmanager::SimpleArcChannelManager<ChainMonitor<B, F, FE>, B, FE, Logger>;
-/// # type OnionMessenger<B, F, FE> = lightning::onion_message::messenger::OnionMessenger<Arc<lightning::sign::KeysManager>, Arc<lightning::sign::KeysManager>, Arc<Logger>, Arc<ChannelManager<B, F, FE>>, Arc<lightning::onion_message::messenger::DefaultMessageRouter<Arc<NetworkGraph>, Arc<Logger>, Arc<lightning::sign::KeysManager>>>, Arc<ChannelManager<B, F, FE>>, lightning::ln::peer_handler::IgnoringMessageHandler>;
+/// # type OnionMessenger<B, F, FE> = lightning::onion_message::messenger::OnionMessenger<Arc<lightning::sign::KeysManager>, Arc<lightning::sign::KeysManager>, Arc<Logger>, Arc<ChannelManager<B, F, FE>>, Arc<lightning::onion_message::messenger::DefaultMessageRouter<Arc<NetworkGraph>, Arc<Logger>, Arc<lightning::sign::KeysManager>>>, Arc<ChannelManager<B, F, FE>>, lightning::ln::peer_handler::IgnoringMessageHandler, lightning::ln::peer_handler::IgnoringMessageHandler>;
/// # type Scorer = RwLock<lightning::routing::scoring::ProbabilisticScorer<Arc<NetworkGraph>, Arc<Logger>>>;
/// # type PeerManager<B, F, FE, UL> = lightning::ln::peer_handler::SimpleArcPeerManager<SocketDescriptor, ChainMonitor<B, F, FE>, B, FE, Arc<UL>, Logger>;
/// #
type PGS = Arc<P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>>;
type RGS = Arc<RapidGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>>>;
- type OM = OnionMessenger<Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestLogger>, Arc<ChannelManager>, Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<KeysManager>>>, IgnoringMessageHandler, IgnoringMessageHandler>;
+ type OM = OnionMessenger<Arc<KeysManager>, Arc<KeysManager>, Arc<test_utils::TestLogger>, Arc<ChannelManager>, Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestLogger>, Arc<KeysManager>>>, IgnoringMessageHandler, Arc<ChannelManager>, IgnoringMessageHandler>;
struct Node {
node: Arc<ChannelManager>,
let best_block = BestBlock::from_network(network);
let params = ChainParameters { network, best_block };
let manager = Arc::new(ChannelManager::new(fee_estimator.clone(), chain_monitor.clone(), tx_broadcaster.clone(), router.clone(), logger.clone(), keys_manager.clone(), keys_manager.clone(), keys_manager.clone(), UserConfig::default(), params, genesis_block.header.time));
- let messenger = Arc::new(OnionMessenger::new(keys_manager.clone(), keys_manager.clone(), logger.clone(), manager.clone(), msg_router.clone(), IgnoringMessageHandler {}, IgnoringMessageHandler {}));
+ let messenger = Arc::new(OnionMessenger::new(keys_manager.clone(), keys_manager.clone(), logger.clone(), manager.clone(), msg_router.clone(), IgnoringMessageHandler {}, manager.clone(), IgnoringMessageHandler {}));
let wallet = Arc::new(TestWallet {});
let sweeper = Arc::new(OutputSweeper::new(best_block, Arc::clone(&tx_broadcaster), Arc::clone(&fee_estimator),
None::<Arc<dyn Filter + Sync + Send>>, Arc::clone(&keys_manager), wallet, Arc::clone(&kv_store), Arc::clone(&logger)));
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(rustdoc::private_intra_doc_links)]
-
#![deny(missing_docs)]
#![deny(unsafe_code)]
#![deny(non_upper_case_globals)]
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
-#[cfg(ldk_bench)] extern crate criterion;
+#[cfg(ldk_bench)]
+extern crate criterion;
#[cfg(not(feature = "std"))]
extern crate alloc;
-#[cfg(feature = "std")]
-use std::fs::File;
use core::ops::Deref;
use core::sync::atomic::{AtomicBool, Ordering};
+#[cfg(feature = "std")]
+use std::fs::File;
use lightning::io;
use lightning::ln::msgs::{DecodeError, LightningError};
/// See [crate-level documentation] for usage.
///
/// [crate-level documentation]: crate
-pub struct RapidGossipSync<NG: Deref<Target=NetworkGraph<L>>, L: Deref>
-where L::Target: Logger {
+pub struct RapidGossipSync<NG: Deref<Target = NetworkGraph<L>>, L: Deref>
+where
+ L::Target: Logger,
+{
network_graph: NG,
logger: L,
- is_initial_sync_complete: AtomicBool
+ is_initial_sync_complete: AtomicBool,
}
-impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L::Target: Logger {
+impl<NG: Deref<Target = NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L>
+where
+ L::Target: Logger,
+{
/// Instantiate a new [`RapidGossipSync`] instance.
pub fn new(network_graph: NG, logger: L) -> Self {
- Self {
- network_graph,
- logger,
- is_initial_sync_complete: AtomicBool::new(false)
- }
+ Self { network_graph, logger, is_initial_sync_complete: AtomicBool::new(false) }
}
/// Sync gossip data from a file.
///
#[cfg(feature = "std")]
pub fn sync_network_graph_with_file_path(
- &self,
- sync_path: &str,
+ &self, sync_path: &str,
) -> Result<u32, GraphSyncError> {
let mut file = File::open(sync_path)?;
self.update_network_graph_from_byte_stream(&mut file)
///
/// `update_data`: `&[u8]` binary stream that comprises the update data
/// `current_time_unix`: `Option<u64>` optional current timestamp to verify data age
- pub fn update_network_graph_no_std(&self, update_data: &[u8], current_time_unix: Option<u64>) -> Result<u32, GraphSyncError> {
+ pub fn update_network_graph_no_std(
+ &self, update_data: &[u8], current_time_unix: Option<u64>,
+ ) -> Result<u32, GraphSyncError> {
let mut read_cursor = io::Cursor::new(update_data);
self.update_network_graph_from_byte_stream_no_std(&mut read_cursor, current_time_unix)
}
use bitcoin::Network;
+ use crate::{GraphSyncError, RapidGossipSync};
use lightning::ln::msgs::DecodeError;
use lightning::routing::gossip::NetworkGraph;
use lightning::util::test_utils::TestLogger;
- use crate::{GraphSyncError, RapidGossipSync};
#[test]
fn test_sync_from_file() {
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let start = std::time::Instant::now();
- let sync_result = rapid_sync
- .sync_network_graph_with_file_path("./res/full_graph.lngossip");
+ let sync_result = rapid_sync.sync_network_graph_with_file_path("./res/full_graph.lngossip");
if let Err(GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-285cb27df79-2022-07-21.bin\n\n{:?}", io_error);
#[cfg(not(require_route_graph_test))]
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::secp256k1::PublicKey;
-use lightning::ln::msgs::{DecodeError, ErrorAction, LightningError, SocketAddress, UnsignedChannelUpdate, UnsignedNodeAnnouncement};
+use lightning::io;
+use lightning::ln::msgs::{
+ DecodeError, ErrorAction, LightningError, SocketAddress, UnsignedChannelUpdate,
+ UnsignedNodeAnnouncement,
+};
use lightning::routing::gossip::{NetworkGraph, NodeAlias, NodeId};
use lightning::util::logger::Logger;
-use lightning::{log_debug, log_warn, log_trace, log_given_level, log_gossip};
use lightning::util::ser::{BigSize, FixedLengthReader, Readable};
-use lightning::io;
+use lightning::{log_debug, log_given_level, log_gossip, log_trace, log_warn};
use crate::{GraphSyncError, RapidGossipSync};
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(all(not(feature = "std"), not(test)))]
-use alloc::{vec::Vec, borrow::ToOwned};
+use alloc::{borrow::ToOwned, vec::Vec};
use lightning::ln::features::NodeFeatures;
/// The purpose of this prefix is to identify the serialization format, should other rapid gossip
/// suggestion.
const STALE_RGS_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14;
-impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L::Target: Logger {
+impl<NG: Deref<Target = NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L>
+where
+ L::Target: Logger,
+{
#[cfg(feature = "std")]
pub(crate) fn update_network_graph_from_byte_stream<R: io::Read>(
- &self,
- read_cursor: &mut R,
+ &self, read_cursor: &mut R,
) -> Result<u32, GraphSyncError> {
#[allow(unused_mut, unused_assignments)]
let mut current_time_unix = None;
{
// Note that many tests rely on being able to set arbitrarily old timestamps, thus we
// disable this check during tests!
- current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs());
+ current_time_unix = Some(
+ SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .expect("Time must be > 1970")
+ .as_secs(),
+ );
}
self.update_network_graph_from_byte_stream_no_std(read_cursor, current_time_unix)
}
pub(crate) fn update_network_graph_from_byte_stream_no_std<R: io::Read>(
- &self,
- mut read_cursor: &mut R,
- current_time_unix: Option<u64>
+ &self, mut read_cursor: &mut R, current_time_unix: Option<u64>,
) -> Result<u32, GraphSyncError> {
log_trace!(self.logger, "Processing RGS data...");
let mut protocol_prefix = [0u8; 3];
let chain_hash: ChainHash = Readable::read(read_cursor)?;
let ng_chain_hash = self.network_graph.get_chain_hash();
if chain_hash != ng_chain_hash {
- return Err(
- LightningError {
- err: "Rapid Gossip Sync data's chain hash does not match the network graph's".to_owned(),
- action: ErrorAction::IgnoreError,
- }.into()
- );
+ return Err(LightningError {
+ err: "Rapid Gossip Sync data's chain hash does not match the network graph's"
+ .to_owned(),
+ action: ErrorAction::IgnoreError,
+ }
+ .into());
}
let latest_seen_timestamp: u32 = Readable::read(read_cursor)?;
if let Some(time) = current_time_unix {
- if (latest_seen_timestamp as u64) < time.saturating_sub(STALE_RGS_UPDATE_AGE_LIMIT_SECS) {
- return Err(LightningError{err: "Rapid Gossip Sync data is more than two weeks old".to_owned(), action: ErrorAction::IgnoreError}.into());
+ if (latest_seen_timestamp as u64) < time.saturating_sub(STALE_RGS_UPDATE_AGE_LIMIT_SECS)
+ {
+ return Err(LightningError {
+ err: "Rapid Gossip Sync data is more than two weeks old".to_owned(),
+ action: ErrorAction::IgnoreError,
+ }
+ .into());
}
}
excess_data: Vec::new(),
};
- read_only_network_graph.nodes()
+ read_only_network_graph
+ .nodes()
.get(¤t_node_id)
.and_then(|node| node.announcement_info.as_ref())
.map(|info| {
let mut node_addresses: Vec<SocketAddress> = Vec::new();
for address_index in 0..address_count {
let current_byte_count: u8 = Readable::read(read_cursor)?;
- let mut address_reader = FixedLengthReader::new(&mut read_cursor, current_byte_count as u64);
+ let mut address_reader =
+ FixedLengthReader::new(&mut read_cursor, current_byte_count as u64);
if let Ok(current_address) = Readable::read(&mut address_reader) {
node_addresses.push(current_address);
if address_reader.bytes_remain() {
log_gossip!(
self.logger,
"Failure to parse address at index {} for node ID {}",
- address_index, current_node_id
+ address_index,
+ current_node_id
);
address_reader.eat_remaining()?;
}
if has_additional_data {
let additional_data: Vec<u8> = Readable::read(read_cursor)?;
- log_gossip!(self.logger, "Ignoring {} bytes of additional data in node announcement", additional_data.len());
+ log_gossip!(
+ self.logger,
+ "Ignoring {} bytes of additional data in node announcement",
+ additional_data.len()
+ );
}
}
} else {
// handle SCID
let scid_delta: BigSize = Readable::read(read_cursor)?;
- let short_channel_id = previous_scid
- .checked_add(scid_delta.0)
- .ok_or(DecodeError::InvalidValue)?;
+ let short_channel_id =
+ previous_scid.checked_add(scid_delta.0).ok_or(DecodeError::InvalidValue)?;
previous_scid = short_channel_id;
let node_id_1_index: BigSize = Readable::read(read_cursor)?;
let mut node_id_2_index: BigSize = Readable::read(read_cursor)?;
let has_additional_data = (node_id_2_index.0 & (1 << 63)) > 0;
- node_id_2_index.0 &= !(1 << 63); // ensure 63rd bit isn't set
+ // ensure 63rd bit isn't set
+ node_id_2_index.0 &= !(1 << 63);
if max(node_id_1_index.0, node_id_2_index.0) >= node_id_count as u64 {
return Err(DecodeError::InvalidValue.into());
let node_id_1 = node_ids[node_id_1_index.0 as usize];
let node_id_2 = node_ids[node_id_2_index.0 as usize];
- log_gossip!(self.logger, "Adding channel {} from RGS announcement at {}",
- short_channel_id, latest_seen_timestamp);
+ log_gossip!(
+ self.logger,
+ "Adding channel {} from RGS announcement at {}",
+ short_channel_id,
+ latest_seen_timestamp
+ );
let announcement_result = network_graph.add_channel_from_partial_announcement(
short_channel_id,
if let ErrorAction::IgnoreDuplicateGossip = lightning_error.action {
// everything is fine, just a duplicate channel announcement
} else {
- log_warn!(self.logger, "Failed to process channel announcement: {:?}", lightning_error);
+ log_warn!(
+ self.logger,
+ "Failed to process channel announcement: {:?}",
+ lightning_error
+ );
return Err(lightning_error.into());
}
}
if version >= 2 && has_additional_data {
// forwards compatibility
let additional_data: Vec<u8> = Readable::read(read_cursor)?;
- log_gossip!(self.logger, "Ignoring {} bytes of additional data in channel announcement", additional_data.len());
+ log_gossip!(
+ self.logger,
+ "Ignoring {} bytes of additional data in channel announcement",
+ additional_data.len()
+ );
}
}
for modification in node_modifications {
match network_graph.update_node_from_unsigned_announcement(&modification) {
- Ok(_) => {}
- Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {}
+ Ok(_) => {},
+ Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => {
- log_given_level!(self.logger, level, "Failed to apply node announcement: {:?}", err);
- }
+ log_given_level!(
+ self.logger,
+ level,
+ "Failed to apply node announcement: {:?}",
+ err
+ );
+ },
Err(LightningError { action: ErrorAction::IgnoreError, err }) => {
log_gossip!(self.logger, "Failed to apply node announcement: {:?}", err);
- }
+ },
Err(e) => return Err(e.into()),
}
}
- previous_scid = 0; // updates start at a new scid
+ // updates start at a new scid
+ previous_scid = 0;
let update_count: u32 = Readable::read(read_cursor)?;
log_debug!(self.logger, "Processing RGS update from {} with {} nodes, {} channel announcements and {} channel updates.",
for _ in 0..update_count {
let scid_delta: BigSize = Readable::read(read_cursor)?;
- let short_channel_id = previous_scid
- .checked_add(scid_delta.0)
- .ok_or(DecodeError::InvalidValue)?;
+ let short_channel_id =
+ previous_scid.checked_add(scid_delta.0).ok_or(DecodeError::InvalidValue)?;
previous_scid = short_channel_id;
let channel_flags: u8 = Readable::read(read_cursor)?;
if scid_delta.0 == 0 && is_same_direction_update {
// this is additional data for forwards compatibility
let additional_data: Vec<u8> = Readable::read(read_cursor)?;
- log_gossip!(self.logger, "Ignoring {} bytes of additional data in channel update", additional_data.len());
+ log_gossip!(
+ self.logger,
+ "Ignoring {} bytes of additional data in channel update",
+ additional_data.len()
+ );
continue;
}
}
if (channel_flags & 0b_1000_0000) != 0 {
// incremental update, field flags will indicate mutated values
let read_only_network_graph = network_graph.read_only();
- if let Some(directional_info) =
- read_only_network_graph.channels().get(&short_channel_id)
+ if let Some(directional_info) = read_only_network_graph
+ .channels()
+ .get(&short_channel_id)
.and_then(|channel| channel.get_directional_info(channel_flags))
{
synthetic_update.cltv_expiry_delta = directional_info.cltv_expiry_delta;
synthetic_update.htlc_minimum_msat = directional_info.htlc_minimum_msat;
synthetic_update.htlc_maximum_msat = directional_info.htlc_maximum_msat;
synthetic_update.fee_base_msat = directional_info.fees.base_msat;
- synthetic_update.fee_proportional_millionths = directional_info.fees.proportional_millionths;
+ synthetic_update.fee_proportional_millionths =
+ directional_info.fees.proportional_millionths;
} else {
log_trace!(self.logger,
"Skipping application of channel update for chan {} with flags {} as original data is missing.",
continue;
}
- log_gossip!(self.logger, "Updating channel {} with flags {} from RGS announcement at {}",
- short_channel_id, channel_flags, latest_seen_timestamp);
+ log_gossip!(
+ self.logger,
+ "Updating channel {} with flags {} from RGS announcement at {}",
+ short_channel_id,
+ channel_flags,
+ latest_seen_timestamp
+ );
match network_graph.update_channel_unsigned(&synthetic_update) {
Ok(_) => {},
Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => {
- log_given_level!(self.logger, level, "Failed to apply channel update: {:?}", err);
+ log_given_level!(
+ self.logger,
+ level,
+ "Failed to apply channel update: {:?}",
+ err
+ );
},
Err(LightningError { action: ErrorAction::IgnoreError, .. }) => {},
Err(e) => return Err(e.into()),
use crate::{GraphSyncError, RapidGossipSync};
const VALID_RGS_BINARY: [u8; 300] = [
- 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
- 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
- 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
- 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
- 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
- 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
- 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
- 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
- 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
- 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
- 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
- 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0,
- 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0,
- 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1,
- 0, 0, 1,
+ 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79,
+ 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218, 0, 0, 0,
+ 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251, 187, 172, 38,
+ 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125, 157, 176, 223,
+ 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136, 88, 216, 115, 11,
+ 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106, 204, 131, 186, 35,
+ 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138, 181, 64, 187, 103, 127,
+ 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175, 110, 32, 237, 0, 217, 90,
+ 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128, 76, 97, 0, 0, 0, 2, 0, 0, 255,
+ 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68, 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4,
+ 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232, 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192,
+ 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224,
+ 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0, 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116,
+ 216, 255, 2, 68, 226, 0, 6, 11, 0, 1, 0, 0, 1,
];
const VALID_BINARY_TIMESTAMP: u64 = 1642291930;
76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 97, 206, 240,
0, 0, 0, 0, 2, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5,
- 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 38, 4, 1,
+ 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 38, 4,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 0, 2, 3, 0, 4, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5, 10, 103, 111, 111, 103, 108,
- 101, 46, 99, 111, 109, 1, 187, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5,
- 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 2, 23, 48, 62, 77, 75, 108, 209,
- 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66,
- 51, 116, 196, 81, 167, 37, 77, 7, 102, 0, 0, 2, 25, 48, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
- 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 1
+ 1, 1, 1, 0, 2, 3, 0, 4, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5, 10, 103, 111, 111, 103,
+ 108, 101, 46, 99, 111, 109, 1, 187, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 2, 23, 48, 62, 77, 75, 108,
+ 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201,
+ 66, 51, 116, 196, 81, 167, 37, 77, 7, 102, 0, 0, 2, 25, 48, 0, 0, 0, 1, 0, 0, 1, 0, 1,
+ 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1,
];
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
{
// address node
- let node_id = NodeId::from_slice(&[3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143]).unwrap();
+ let node_id = NodeId::from_slice(&[
+ 3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30,
+ 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143,
+ ])
+ .unwrap();
let node = nodes.get(&node_id).unwrap();
let announcement_info = node.announcement_info.as_ref().unwrap();
let addresses = announcement_info.addresses();
{
// feature node
- let node_id = NodeId::from_slice(&[2, 77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102]).unwrap();
+ let node_id = NodeId::from_slice(&[
+ 2, 77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217,
+ 234, 216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102,
+ ])
+ .unwrap();
let node = nodes.get(&node_id).unwrap();
let announcement_info = node.announcement_info.as_ref().unwrap();
let features = announcement_info.features();
// assert_eq!(addresses.len(), 5);
}
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Failed to apply node announcement", 0);
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Failed to apply node announcement",
+ 0,
+ );
}
#[test]
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let example_input = vec![
- 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 105, 183, 240, 0, 0, 0, 0, 1, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 57, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 14, 5, 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 2, 23, 48, 0, 0, 0, 0, 0, 0, 0, 0,
+ 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+ 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 105, 183,
+ 240, 0, 0, 0, 0, 1, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186,
+ 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 13,
+ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 19, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 57, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 14, 5,
+ 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 2, 23, 48, 0, 0, 0, 0,
+ 0, 0, 0, 0,
];
let update_result = rapid_sync.update_network_graph_no_std(&example_input[..], None);
assert!(update_result.is_ok());
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Failed to apply node announcement: \"No existing channels for node_announcement\"", 1);
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Failed to apply node announcement: \"No existing channels for node_announcement\"",
+ 1,
+ );
}
#[test]
76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 106, 12, 80,
1, 0, 2, 23, 48, 0, 0, 0, 3, 143, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213,
- 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143,
- 5, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5,
- 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 255, 0, 0, 0, 0, 0, 0, 0,
+ 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7,
+ 143, 5, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163,
+ 14, 5, 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 255, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 77, 75, 108, 209, 54, 16,
+ 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116,
+ 196, 81, 167, 37, 77, 7, 102, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 77, 75, 108, 209, 54, 16, 50, 202,
- 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116, 196, 81,
- 167, 37, 77, 7, 102, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 186, 83, 31, 230, 6, 129, 52, 80, 61, 39, 35, 19, 50, 39, 200,
+ 103, 172, 143, 166, 200, 60, 83, 126, 154, 68, 195, 197, 189, 189, 203, 31, 227, 55, 0,
+ 2, 22, 49, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 186, 83, 31, 230, 6, 129, 52, 80, 61, 39, 35, 19, 50, 39, 200, 103, 172, 143,
- 166, 200, 60, 83, 126, 154, 68, 195, 197, 189, 189, 203, 31, 227, 55, 0, 2, 22, 49, 0,
- 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 1, 0, 255, 128, 0, 0, 0, 0, 0, 0, 1, 0, 147, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 0, 0, 0, 1, 0, 0, 1, 0, 255, 128, 0, 0, 0, 0, 0, 0, 1, 0, 147, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
- 23, 23, 23, 23, 23, 23, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 17, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42, 0, 1, 0, 1, 0, 17, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 17, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 0, 1, 0, 1, 0, 17, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
];
let update_result = rapid_sync.update_network_graph_no_std(&example_input[..], None);
assert!(update_result.is_ok());
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 255 bytes of additional data in node announcement", 3);
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 147 bytes of additional data in channel announcement", 1);
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 17 bytes of additional data in channel update", 1);
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Ignoring 255 bytes of additional data in node announcement",
+ 3,
+ );
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Ignoring 147 bytes of additional data in channel announcement",
+ 1,
+ );
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Ignoring 17 bytes of additional data in channel update",
+ 1,
+ );
}
#[test]
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let initialization_result = rapid_sync.update_network_graph(&initialization_input[..]);
if initialization_result.is_err() {
- panic!(
- "Unexpected initialization result: {:?}",
- initialization_result
- )
+ panic!("Unexpected initialization result: {:?}", initialization_result)
}
assert_eq!(network_graph.read_only().channels().len(), 2);
0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
68, 226, 0, 6, 11, 0, 1, 128,
];
- let update_result = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
+ let update_result =
+ rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
if update_result.is_err() {
panic!("Unexpected update result: {:?}", update_result)
}
0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
68, 226, 0, 6, 11, 0, 1, 128,
];
- let update_result_1 = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
+ let update_result_1 =
+ rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
// Apply duplicate update
- let update_result_2 = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
+ let update_result_2 =
+ rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
assert!(update_result_1.is_ok());
assert!(update_result_2.is_ok());
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time));
assert!(update_result.is_ok());
assert_eq!(network_graph.read_only().channels().len(), 2);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time + 1));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time + 1));
assert!(update_result.is_ok());
assert_eq!(network_graph.read_only().channels().len(), 0);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_succeeding_time));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_succeeding_time));
assert!(update_result.is_ok());
assert_eq!(network_graph.read_only().channels().len(), 0);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(earliest_failing_time));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(earliest_failing_time));
assert!(update_result.is_err());
if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
assert_eq!(
let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(0));
assert!(update_result.is_err());
if let Err(GraphSyncError::LightningError(err)) = update_result {
- assert_eq!(err.err, "Rapid Gossip Sync data's chain hash does not match the network graph's");
+ assert_eq!(
+ err.err,
+ "Rapid Gossip Sync data's chain hash does not match the network graph's"
+ );
} else {
panic!("Unexpected update result: {:?}", update_result)
}
use core::ops::Deref;
/// An intermediate node, and possibly a short channel id leading to the next node.
-#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct ForwardNode {
/// This node's pubkey.
pub node_id: PublicKey,
// Advance the blinded onion message path by one hop, so make the second hop into the new
// introduction node.
+//
+// Will only modify `path` when returning `Ok`.
pub(crate) fn advance_path_by_one<NS: Deref, NL: Deref, T>(
path: &mut BlindedPath, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1<T>
) -> Result<(), ()>
{
let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &path.blinding_point, None)?;
let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes());
- let encrypted_control_tlvs = path.blinded_hops.remove(0).encrypted_payload;
- let mut s = Cursor::new(&encrypted_control_tlvs);
+ let encrypted_control_tlvs = &path.blinded_hops.get(0).ok_or(())?.encrypted_payload;
+ let mut s = Cursor::new(encrypted_control_tlvs);
let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64);
match ChaChaPolyReadAdapter::read(&mut reader, rho) {
Ok(ChaChaPolyReadAdapter {
};
mem::swap(&mut path.blinding_point, &mut new_blinding_point);
path.introduction_node = IntroductionNode::NodeId(next_node_id);
+ path.blinded_hops.remove(0);
Ok(())
},
_ => Err(())
use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
-use crate::blinded_path::BlindedHop;
+use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode, NodeIdLookUp};
use crate::blinded_path::utils;
+use crate::crypto::streams::ChaChaPolyReadAdapter;
use crate::io;
+use crate::io::Cursor;
use crate::ln::types::PaymentSecret;
use crate::ln::channel_state::CounterpartyForwardingInfo;
use crate::ln::features::BlindedHopFeatures;
use crate::ln::msgs::DecodeError;
+use crate::ln::onion_utils;
use crate::offers::invoice::BlindedPayInfo;
use crate::offers::invoice_request::InvoiceRequestFields;
use crate::offers::offer::OfferId;
-use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, Writeable, Writer};
+use crate::sign::{NodeSigner, Recipient};
+use crate::util::ser::{FixedLengthReader, LengthReadableArgs, HighZeroBytesDroppedBigSize, Readable, Writeable, Writer};
+
+use core::mem;
+use core::ops::Deref;
#[allow(unused_imports)]
use crate::prelude::*;
utils::construct_blinded_hops(secp_ctx, pks, tlvs, session_priv)
}
+// Advance the blinded onion payment path by one hop, so make the second hop into the new
+// introduction node.
+//
+// Will only modify `path` when returning `Ok`.
+pub(crate) fn advance_path_by_one<NS: Deref, NL: Deref, T>(
+ path: &mut BlindedPath, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1<T>
+) -> Result<(), ()>
+where
+ NS::Target: NodeSigner,
+ NL::Target: NodeIdLookUp,
+ T: secp256k1::Signing + secp256k1::Verification,
+{
+ let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &path.blinding_point, None)?;
+ let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes());
+ let encrypted_control_tlvs = &path.blinded_hops.get(0).ok_or(())?.encrypted_payload;
+ let mut s = Cursor::new(encrypted_control_tlvs);
+ let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64);
+ match ChaChaPolyReadAdapter::read(&mut reader, rho) {
+ Ok(ChaChaPolyReadAdapter {
+ readable: BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. })
+ }) => {
+ let next_node_id = match node_id_lookup.next_node_id(short_channel_id) {
+ Some(node_id) => node_id,
+ None => return Err(()),
+ };
+ let mut new_blinding_point = onion_utils::next_hop_pubkey(
+ secp_ctx, path.blinding_point, control_tlvs_ss.as_ref()
+ ).map_err(|_| ())?;
+ mem::swap(&mut path.blinding_point, &mut new_blinding_point);
+ path.introduction_node = IntroductionNode::NodeId(next_node_id);
+ path.blinded_hops.remove(0);
+ Ok(())
+ },
+ _ => Err(())
+ }
+}
+
/// `None` if underflow occurs.
pub(crate) fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
let inbound_amt = inbound_amt_msat as u128;
use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
use core::ops::Deref;
use core::sync::atomic::{AtomicUsize, Ordering};
+use bitcoin::hashes::Hash;
use bitcoin::secp256k1::PublicKey;
/// `Persist` defines behavior for persisting channel monitors: this could mean
{
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned());
+ let channel_count = funding_outpoints.len();
for funding_outpoint in funding_outpoints.iter() {
let monitor_lock = self.monitors.read().unwrap();
if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
- if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
+ if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
// Take the monitors lock for writing so that we poison it and any future
// operations going forward fail immediately.
core::mem::drop(monitor_lock);
let monitor_states = self.monitors.write().unwrap();
for (funding_outpoint, monitor_state) in monitor_states.iter() {
if !funding_outpoints.contains(funding_outpoint) {
- if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
+ if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
log_error!(self.logger, "{}", err_str);
panic!("{}", err_str);
}
}
fn update_monitor_with_chain_data<FN>(
- &self, header: &Header, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
- monitor_state: &MonitorHolder<ChannelSigner>
+ &self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
+ monitor_state: &MonitorHolder<ChannelSigner>, channel_count: usize,
) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
let monitor = &monitor_state.monitor;
let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
- let mut txn_outputs;
- {
- txn_outputs = process(monitor, txdata);
+
+ let mut txn_outputs = process(monitor, txdata);
+
+ let get_partition_key = |funding_outpoint: &OutPoint| {
+ let funding_txid_hash = funding_outpoint.txid.to_raw_hash();
+ let funding_txid_hash_bytes = funding_txid_hash.as_byte_array();
+ let funding_txid_u32 = u32::from_be_bytes([funding_txid_hash_bytes[0], funding_txid_hash_bytes[1], funding_txid_hash_bytes[2], funding_txid_hash_bytes[3]]);
+ funding_txid_u32.wrapping_add(best_height.unwrap_or_default())
+ };
+
+ let partition_factor = if channel_count < 15 {
+ 5
+ } else {
+ 50 // ~ 8hours
+ };
+
+ let has_pending_claims = monitor_state.monitor.has_pending_claims();
+ if has_pending_claims || get_partition_key(funding_outpoint) % partition_factor == 0 {
log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
ChannelMonitorUpdateStatus::Completed =>
),
ChannelMonitorUpdateStatus::InProgress => {
log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
- },
+ }
ChannelMonitorUpdateStatus::UnrecoverableError => {
return Err(());
- },
+ }
}
}
#[cfg(test)]
mod tests {
- use crate::check_added_monitors;
+ use crate::{check_added_monitors, check_closed_event};
use crate::{expect_payment_path_successful, get_event_msg};
use crate::{get_htlc_update_msgs, get_revoke_commit_msgs};
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
- use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use crate::chain::channelmonitor::ANTI_REORG_DELAY;
+ use crate::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
+ const CHAINSYNC_MONITOR_PARTITION_FACTOR: u32 = 5;
+
#[test]
fn test_async_ooo_offchain_updates() {
// Test that if we have multiple offchain updates being persisted and they complete
check_added_monitors!(nodes[0], 1);
}
+ #[test]
+ fn test_chainsync_triggers_distributed_monitor_persistence() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Use FullBlockViaListen to avoid duplicate calls to process_chain_data and skips_blocks() in
+ // case of other connect_styles.
+ *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
+ *nodes[1].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
+ *nodes[2].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
+
+ let _channel_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let channel_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0).2;
+
+ chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+ connect_blocks(&nodes[1], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+ connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+
+ // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] * 2 blocks should trigger only 2 writes
+ // per monitor/channel.
+ assert_eq!(2 * 2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ assert_eq!(2, chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ assert_eq!(2, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+
+ // Test that monitors with pending_claims are persisted on every block.
+ // Now, close channel_2 i.e. b/w node-0 and node-2 to create pending_claim in node[0].
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_2, &nodes[2].node.get_our_node_id(), "Channel force-closed".to_string()).unwrap();
+ check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
+ [nodes[2].node.get_our_node_id()], 1000000);
+ check_closed_broadcast(&nodes[0], 1, true);
+ let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(close_tx.len(), 1);
+
+ mine_transaction(&nodes[2], &close_tx[0]);
+ check_added_monitors(&nodes[2], 1);
+ check_closed_broadcast(&nodes[2], 1, true);
+ check_closed_event!(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false,
+ [nodes[0].node.get_our_node_id()], 1000000);
+
+ chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+
+ // For channel_2, there should be a monitor write for every block connection.
+ // We connect [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`] blocks since we don't know when
+ // channel_1 monitor persistence will occur, with [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`]
+ // it will be persisted exactly once.
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
+ connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR);
+
+ // DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR writes for channel_2 due to pending_claim, 1 for
+ // channel_1
+ assert_eq!((CHAINSYNC_MONITOR_PARTITION_FACTOR + 1) as usize, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ // For node[2], there is no pending_claim
+ assert_eq!(1, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+
+ // Confirm claim for node[0] with ANTI_REORG_DELAY and reset monitor write counter.
+ mine_transaction(&nodes[0], &close_tx[0]);
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ check_added_monitors(&nodes[0], 1);
+ chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+
+ // Again connect 1 full cycle of DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR blocks, it should only
+ // result in 1 write per monitor/channel.
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
+ assert_eq!(2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ }
+
#[test]
#[cfg(feature = "std")]
fn update_during_chainsync_poisons_channel() {
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
+ *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::UnrecoverableError);
assert!(std::panic::catch_unwind(|| {
// Returning an UnrecoverableError should always panic immediately
- connect_blocks(&nodes[0], 1);
+ // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] blocks so that we trigger some persistence
+ // after accounting for block-height based partitioning/distribution.
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
}).is_err());
assert!(std::panic::catch_unwind(|| {
// ...and also poison our locks causing later use to panic as well
);
}
+ /// Returns true if the monitor has pending claim requests that are not fully confirmed yet.
+ pub fn has_pending_claims(&self) -> bool
+ {
+ self.inner.lock().unwrap().onchain_tx_handler.has_pending_claims()
+ }
+
/// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction
/// signature generation failure.
pub fn signer_unblocked<B: Deref, F: Deref, L: Deref>(
}
#[cfg(test)]
- pub fn do_signer_call<F: FnMut(&Signer) -> ()>(&self, mut f: F) {
- let inner = self.inner.lock().unwrap();
- f(&inner.onchain_tx_handler.signer);
+ pub fn do_mut_signer_call<F: FnMut(&mut Signer) -> ()>(&self, mut f: F) {
+ let mut inner = self.inner.lock().unwrap();
+ f(&mut inner.onchain_tx_handler.signer);
}
}
}
}
+ /// Returns true if we are currently tracking any pending claim requests that are not fully
+ /// confirmed yet.
+ pub(super) fn has_pending_claims(&self) -> bool
+ {
+ self.pending_claim_requests.len() != 0
+ }
+
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
/// onchain) lays on the assumption of claim transactions getting confirmed before timelock
/// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck
/// If the recipient or an intermediate node misbehaves and gives us free money, this may
/// overstate the amount paid, though this is unlikely.
///
+ /// This is only `None` for payments initiated on LDK versions prior to 0.0.103.
+ ///
/// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees
fee_paid_msat: Option<u64>,
},
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
+use crate::util::test_channel_signer::SignerOp;
#[test]
fn test_async_commitment_signature_for_funding_created() {
// But! Let's make node[0]'s signer be unavailable: we should *not* broadcast a funding_created
// message...
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment);
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
check_added_monitors(&nodes[0], 0);
channels[0].channel_id
};
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
nodes[0].node.signer_unblocked(Some((nodes[1].node.get_our_node_id(), chan_id)));
let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
// Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should
// *not* broadcast a `funding_signed`...
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[1].disable_channel_signer_op(&nodes[0].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors(&nodes[1], 1);
assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len());
channels[0].channel_id
};
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true);
+ nodes[1].enable_channel_signer_op(&nodes[0].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id)));
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
// Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a
// `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false);
+ dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors(dst, 1);
get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id());
// Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true);
+ dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id)));
let events = dst.node.get_and_clear_pending_msg_events();
// Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should
// *not* broadcast a `funding_signed`...
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[1].disable_channel_signer_op(&nodes[0].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors(&nodes[1], 1);
};
// At this point, we basically expect the channel to open like a normal zero-conf channel.
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true);
+ nodes[1].enable_channel_signer_op(&nodes[0].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id)));
let (funding_signed, channel_ready_1) = {
// Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a
// `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false);
+ dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors(dst, 1);
reconnect_nodes(reconnect_args);
// Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true);
+ dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id)));
{
route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
let error_message = "Channel force-closed";
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false);
if remote_commitment {
// Make the counterparty broadcast its latest commitment.
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100_000);
} else {
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderHtlcTransaction);
// We'll connect blocks until the sender has to go onchain to time out the HTLC.
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
// Mark it as available now, we should see the signed commitment transaction.
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderHtlcTransaction);
get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger);
}
// Mark it as unavailable again to now test the HTLC transaction. We'll mine the commitment such
// that the HTLC transaction is retried.
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false);
+ let sign_htlc_op = if remote_commitment {
+ SignerOp::SignCounterpartyHtlcTransaction
+ } else {
+ SignerOp::SignHolderHtlcTransaction
+ };
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, sign_htlc_op);
mine_transaction(&nodes[0], &commitment_tx);
check_added_monitors(&nodes[0], 1);
if anchors && !remote_commitment {
handle_bump_htlc_event(&nodes[0], 1);
}
- assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty());
+ let txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert!(txn.is_empty(), "expected no transaction to be broadcast, got {:?}", txn);
// Mark it as available now, we should see the signed HTLC transaction.
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, sign_htlc_op);
get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger);
if anchors && !remote_commitment {
}
#[test]
-fn test_async_holder_signatures() {
+fn test_async_holder_signatures_no_anchors() {
do_test_async_holder_signatures(false, false);
+}
+
+#[test]
+fn test_async_holder_signatures_remote_commitment_no_anchors() {
do_test_async_holder_signatures(false, true);
+}
+
+#[test]
+fn test_async_holder_signatures_anchors() {
do_test_async_holder_signatures(true, false);
+}
+
+#[test]
+fn test_async_holder_signatures_remote_commitment_anchors() {
do_test_async_holder_signatures(true, true);
}
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ // As long as the preimage isn't on-chain, we shouldn't expose the `PaymentClaimed` event to
+ // users nor send the preimage to peers in the new commitment update.
nodes[1].node.claim_funds(payment_preimage_1);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
/// Returns the holder signer for this channel.
#[cfg(test)]
- pub fn get_signer(&self) -> &ChannelSignerType<SP> {
- return &self.holder_signer
+ pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType<SP> {
+ return &mut self.holder_signer
}
/// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
}
}
+ /// Performs checks against necessary constraints after receiving either an `accept_channel` or
+ /// `accept_channel2` message.
+ pub fn do_accept_channel_checks(
+ &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures,
+ common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64,
+ ) -> Result<(), ChannelError> {
+ let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits };
+
+ // Check sanity of message fields:
+ if !self.is_outbound() {
+ return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned()));
+ }
+ if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
+ return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned()));
+ }
+ if common_fields.dust_limit_satoshis > 21000000 * 100000000 {
+ return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis)));
+ }
+ if channel_reserve_satoshis > self.channel_value_satoshis {
+ return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis)));
+ }
+ if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis)));
+ }
+ if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+ channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis)));
+ }
+ let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000;
+ if common_fields.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if common_fields.to_self_delay > max_delay_acceptable {
+ return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay)));
+ }
+ if common_fields.max_accepted_htlcs < 1 {
+ return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if common_fields.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS)));
+ }
+
+ // Now check against optional parameters as set by config...
+ if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
+ }
+ if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
+ }
+ if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
+ }
+ if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+ }
+ if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if common_fields.minimum_depth > peer_limits.max_minimum_depth {
+ return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth)));
+ }
+
+ if let Some(ty) = &common_fields.channel_type {
+ if *ty != self.channel_type {
+ return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+ }
+ } else if their_features.supports_channel_type() {
+ // Assume they've accepted the channel type as they said they understand it.
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ self.channel_type = channel_type.clone();
+ self.channel_transaction_parameters.channel_type_features = channel_type;
+ }
+
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &common_fields.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
+
+ self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis;
+ self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000);
+ self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis);
+ self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat;
+ self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs;
+
+ if peer_limits.trust_own_funding_0conf {
+ self.minimum_depth = Some(common_fields.minimum_depth);
+ } else {
+ self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth));
+ }
+
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: common_fields.funding_pubkey,
+ revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint),
+ payment_point: common_fields.payment_basepoint,
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint)
+ };
+
+ self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: common_fields.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ });
+
+ self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point);
+ self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+
+ self.channel_state = ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ );
+ self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+
+ Ok(())
+ }
+
/// Returns the block hash in which our funding transaction was confirmed.
pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
self.funding_tx_confirmed_in
feerate_per_kw = cmp::max(feerate_per_kw, feerate);
}
let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
- cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
+ cmp::max(feerate_per_kw.saturating_add(2530), feerate_plus_quarter.unwrap_or(u32::MAX))
}
/// Get forwarding information for the counterparty.
}
// Message handlers
- pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
- let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
-
- // Check sanity of message fields:
- if !self.context.is_outbound() {
- return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned()));
- }
- if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
- return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned()));
- }
- if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
- return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
- }
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
- return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
- }
- if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
- }
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
- msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
- }
- let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
- }
- let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.common_fields.to_self_delay > max_delay_acceptable {
- return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
- }
- if msg.common_fields.max_accepted_htlcs < 1 {
- return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
- }
- if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
- }
-
- // Now check against optional parameters as set by config...
- if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
- return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
- }
- if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
- }
- if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
- }
- if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
- return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
- }
- if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
- return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
- }
-
- if let Some(ty) = &msg.common_fields.channel_type {
- if *ty != self.context.channel_type {
- return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
- }
- } else if their_features.supports_channel_type() {
- // Assume they've accepted the channel type as they said they understand it.
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- self.context.channel_type = channel_type.clone();
- self.context.channel_transaction_parameters.channel_type_features = channel_type;
- }
-
- let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.common_fields.shutdown_scriptpubkey {
- &Some(ref script) => {
- // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- if script.len() == 0 {
- None
- } else {
- if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
- }
- Some(script.clone())
- }
- },
- // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &None => {
- return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
- }
- }
- } else { None };
-
- self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
- self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
- self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
- self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
- self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
-
- if peer_limits.trust_own_funding_0conf {
- self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
- } else {
- self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
- }
-
- let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.common_fields.funding_pubkey,
- revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
- payment_point: msg.common_fields.payment_basepoint,
- delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
- htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
- };
-
- self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.common_fields.to_self_delay,
- pubkeys: counterparty_pubkeys,
- });
-
- self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
- self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
-
- self.context.channel_state = ChannelState::NegotiatingFunding(
- NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
- );
- self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
-
- Ok(())
+ pub fn accept_channel(
+ &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits,
+ their_features: &InitFeatures
+ ) -> Result<(), ChannelError> {
+ self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis)
}
/// Handles a funding_signed message from the remote end.
use crate::offers::offer::{Offer, OfferBuilder};
use crate::offers::parse::Bolt12SemanticError;
use crate::offers::refund::{Refund, RefundBuilder};
+use crate::onion_message::async_payments::{AsyncPaymentsMessage, HeldHtlcAvailable, ReleaseHeldHtlc, AsyncPaymentsMessageHandler};
use crate::onion_message::messenger::{new_pending_onion_message, Destination, MessageRouter, PendingOnionMessage, Responder, ResponseInstruction};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::sign::{EntropySource, NodeSigner, Recipient, SignerProvider};
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
invoice, payment_id, &self.router, self.list_usable_channels(),
- || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
- best_block_height, &self.logger, &self.pending_events,
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
+ &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
|args| self.send_payment_along_path(args)
)
}
if short_chan_id != 0 {
let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
- () => {
- for forward_info in pending_forwards.drain(..) {
+ ($forward_infos: expr) => {
+ for forward_info in $forward_infos {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
- let logger = WithChannelContext::from(&self.logger, &chan.context, None);
- for forward_info in pending_forwards.drain(..) {
- let queue_fail_htlc_res = match forward_info {
- HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
- prev_user_channel_id, forward_info: PendingHTLCInfo {
- incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
- routing: PendingHTLCRouting::Forward {
- onion_packet, blinded, ..
- }, skimmed_fee_msat, ..
+ let mut draining_pending_forwards = pending_forwards.drain(..);
+ while let Some(forward_info) = draining_pending_forwards.next() {
+ let queue_fail_htlc_res = match forward_info {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
+ incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+ routing: PendingHTLCRouting::Forward {
+ ref onion_packet, blinded, ..
+ }, skimmed_fee_msat, ..
+ },
+ }) => {
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ // Phantom payments are only PendingHTLCRouting::Receive.
+ phantom_shared_secret: None,
+ blinded_failure: blinded.map(|b| b.failure),
+ });
+ let next_blinding_point = blinded.and_then(|b| {
+ let encrypted_tlvs_ss = self.node_signer.ecdh(
+ Recipient::Node, &b.inbound_blinding_point, None
+ ).unwrap().secret_bytes();
+ onion_utils::next_hop_pubkey(
+ &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+ ).ok()
+ });
+
+ // Forward the HTLC over the most appropriate channel with the corresponding peer,
+ // applying non-strict forwarding.
+ // The channel with the least amount of outbound liquidity will be used to maximize the
+ // probability of being able to successfully forward a subsequent HTLC.
+ let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
+ ChannelPhase::Funded(chan) => {
+ let balances = chan.context.get_available_balances(&self.fee_estimator);
+ if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
+ outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
+ chan.context.is_usable() {
+ Some((chan, balances))
+ } else {
+ None
+ }
},
- }) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash));
- log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- user_channel_id: Some(prev_user_channel_id),
- channel_id: prev_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- // Phantom payments are only PendingHTLCRouting::Receive.
- phantom_shared_secret: None,
- blinded_failure: blinded.map(|b| b.failure),
- });
- let next_blinding_point = blinded.and_then(|b| {
- let encrypted_tlvs_ss = self.node_signer.ecdh(
- Recipient::Node, &b.inbound_blinding_point, None
- ).unwrap().secret_bytes();
- onion_utils::next_hop_pubkey(
- &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
- ).ok()
- });
- if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
- payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
- &&logger)
- {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+ _ => None,
+ }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
+ let optimal_channel = match maybe_optimal_channel {
+ Some(chan) => chan,
+ None => {
+ // Fall back to the specified channel to return an appropriate error.
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ chan
} else {
- panic!("Stated return value requirements in send_htlc() were not met");
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
+ }
+ };
+
+ let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
+ let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
+ "specified"
+ } else {
+ "alternate"
+ };
+ log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
+ prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
+ if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
+ payment_hash, outgoing_cltv_value, htlc_source.clone(),
+ onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+ &&logger)
+ {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
- continue;
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
- None
- },
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ }
+ None
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
- },
- HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc(
htlc_id, failure_code, sha256_of_onion, &&logger
);
Some((res, htlc_id))
- },
- };
- if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
- if let Err(e) = queue_fail_htlc_res {
- if let ChannelError::Ignore(msg) = e {
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ };
+ if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+ if let Err(e) = queue_fail_htlc_res {
+ if let ChannelError::Ignore(msg) = e {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
+ } else {
+ panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
}
}
- } else {
- forwarding_channel_not_found!();
- continue;
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.channel_id;
- if let Err((pk, err)) = self.claim_funds_from_hop(
+ self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
}
- ) {
- if let msgs::ErrorAction::IgnoreError = err.err.action {
- // We got a temporary failure updating monitor, but will claim the
- // HTLC when the monitor updating is restored (or on chain).
- let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
- log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
- } else { errs.push((pk, err)); }
- }
+ );
}
}
if !valid_mpp {
}
}
- fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
- prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
- -> Result<(), (PublicKey, MsgHandleErrInternal)> {
+ fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(
+ &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
+ completion_action: ComplFunc,
+ ) {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
// If we haven't yet run background events assume we're still deserializing and shouldn't
let action = if let Some(action) = completion_action(None, true) {
action
} else {
- return Ok(());
+ return;
};
mem::drop(peer_state_lock);
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
- return Ok(());
+ return;
};
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
}
}
}
- return Ok(());
+ return;
}
}
}
// generally always allowed to be duplicative (and it's specifically noted in
// `PaymentForwarded`).
self.handle_monitor_update_completion_actions(completion_action(None, false));
- Ok(())
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(hop_data, payment_preimage,
+ self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
})
}
});
- if let Err((pk, err)) = res {
- let result: Result<(), _> = Err(err);
- let _ = handle_error!(self, result, pk);
- }
},
}
}
},
}
},
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(_invoice) => {
+ match responder {
+ Some(responder) => {
+ responder.respond(OffersMessage::InvoiceError(
+ InvoiceError::from_string("Static invoices not yet supported".to_string())
+ ))
+ },
+ None => return ResponseInstruction::NoResponse,
+ }
+ },
OffersMessage::InvoiceError(invoice_error) => {
log_trace!(self.logger, "Received invoice_error: {}", invoice_error);
ResponseInstruction::NoResponse
}
}
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+AsyncPaymentsMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
+where
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::EcdsaSigner>,
+ T::Target: BroadcasterInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
+ F::Target: FeeEstimator,
+ R::Target: Router,
+ L::Target: Logger,
+{
+ fn held_htlc_available(
+ &self, _message: HeldHtlcAvailable, _responder: Option<Responder>
+ ) -> ResponseInstruction<ReleaseHeldHtlc> {
+ ResponseInstruction::NoResponse
+ }
+
+ fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {}
+
+ fn release_pending_messages(&self) -> Vec<PendingOnionMessage<AsyncPaymentsMessage>> {
+ Vec::new()
+ }
+}
+
impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
NodeIdLookUp for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
use crate::util::logger::Logger;
use crate::util::scid_utils;
use crate::util::test_channel_signer::TestChannelSigner;
+#[cfg(test)]
+use crate::util::test_channel_signer::SignerOp;
use crate::util::test_utils;
use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
use crate::util::ser::{ReadableArgs, Writeable};
&'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
&'node_cfg test_utils::TestMessageRouter<'chan_mon_cfg>,
&'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
+ &'chan_man TestChannelManager<'node_cfg, 'chan_mon_cfg>,
IgnoringMessageHandler,
>;
pub fn get_block_header(&self, height: u32) -> Header {
self.blocks.lock().unwrap()[height as usize].0.header
}
- /// Changes the channel signer's availability for the specified peer and channel.
+
+ /// Toggles this node's signer to be available for the given signer operation.
+ /// This is useful for testing behavior for restoring an async signer that previously
+ /// could not return a signature immediately.
+ #[cfg(test)]
+ pub fn enable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) {
+ self.set_channel_signer_ops(peer_id, chan_id, signer_op, true);
+ }
+
+ /// Toggles this node's signer to be unavailable, returning `Err` for the given signer operation.
+ /// This is useful for testing behavior for an async signer that cannot return a signature
+ /// immediately.
+ #[cfg(test)]
+ pub fn disable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) {
+ self.set_channel_signer_ops(peer_id, chan_id, signer_op, false);
+ }
+
+ /// Changes the channel signer's availability for the specified peer, channel, and signer
+ /// operation.
///
- /// When `available` is set to `true`, the channel signer will behave normally. When set to
- /// `false`, the channel signer will act like an off-line remote signer and will return `Err` for
- /// several of the signing methods. Currently, only `get_per_commitment_point` and
- /// `release_commitment_secret` are affected by this setting.
+ /// For the specified signer operation, when `available` is set to `true`, the channel signer
+ /// will behave normally, returning `Ok`. When set to `false`, and the channel signer will
+ /// act like an off-line remote signer, returning `Err`. This applies to the signer in all
+ /// relevant places, i.e. the channel manager, chain monitor, and the keys manager.
#[cfg(test)]
- pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) {
+ fn set_channel_signer_ops(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp, available: bool) {
use crate::sign::ChannelSigner;
log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available);
let per_peer_state = self.node.per_peer_state.read().unwrap();
- let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap();
+ let mut chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap();
let mut channel_keys_id = None;
- if let Some(chan) = chan_lock.channel_by_id.get(chan_id).map(|phase| phase.context()) {
- chan.get_signer().as_ecdsa().unwrap().set_available(available);
+ if let Some(chan) = chan_lock.channel_by_id.get_mut(chan_id).map(|phase| phase.context_mut()) {
+ let signer = chan.get_mut_signer().as_mut_ecdsa().unwrap();
+ if available {
+ signer.enable_op(signer_op);
+ } else {
+ signer.disable_op(signer_op);
+ }
channel_keys_id = Some(chan.channel_keys_id);
}
- let mut monitor = None;
- for (funding_txo, channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
- if *chan_id == channel_id {
- monitor = self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok();
- }
- }
+ let monitor = self.chain_monitor.chain_monitor.list_monitors().into_iter()
+ .find(|(_, channel_id)| *channel_id == *chan_id)
+ .and_then(|(funding_txo, _)| self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok());
if let Some(monitor) = monitor {
- monitor.do_signer_call(|signer| {
+ monitor.do_mut_signer_call(|signer| {
channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id()));
- signer.set_available(available)
+ if available {
+ signer.enable_op(signer_op);
+ } else {
+ signer.disable_op(signer_op);
+ }
});
}
+ let channel_keys_id = channel_keys_id.unwrap();
+ let mut unavailable_signers_ops = self.keys_manager.unavailable_signers_ops.lock().unwrap();
+ let entry = unavailable_signers_ops.entry(channel_keys_id).or_insert(new_hash_set());
if available {
- self.keys_manager.unavailable_signers.lock().unwrap()
- .remove(channel_keys_id.as_ref().unwrap());
+ entry.remove(&signer_op);
+ if entry.is_empty() {
+ unavailable_signers_ops.remove(&channel_keys_id);
+ }
} else {
- self.keys_manager.unavailable_signers.lock().unwrap()
- .insert(channel_keys_id.unwrap());
- }
+ entry.insert(signer_op);
+ };
}
}
let dedicated_entropy = DedicatedEntropy(RandomBytes::new([i as u8; 32]));
let onion_messenger = OnionMessenger::new(
dedicated_entropy, cfgs[i].keys_manager, cfgs[i].logger, &chan_mgrs[i],
- &cfgs[i].message_router, &chan_mgrs[i], IgnoringMessageHandler {},
+ &cfgs[i].message_router, &chan_mgrs[i], &chan_mgrs[i], IgnoringMessageHandler {},
);
let gossip_sync = P2PGossipSync::new(cfgs[i].network_graph.as_ref(), None, cfgs[i].logger);
let wallet_source = Arc::new(test_utils::TestWalletSource::new(SecretKey::from_slice(&[i as u8 + 1; 32]).unwrap()));
use crate::io_extras::sink;
use crate::prelude::*;
-use core::ops::Deref;
+use bitcoin::absolute::LockTime as AbsoluteLockTime;
use bitcoin::amount::Amount;
use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
use bitcoin::consensus::Encodable;
use bitcoin::policy::MAX_STANDARD_TX_WEIGHT;
use bitcoin::transaction::Version;
-use bitcoin::{
- absolute::LockTime as AbsoluteLockTime, OutPoint, ScriptBuf, Sequence, Transaction, TxIn,
- TxOut, Weight,
-};
+use bitcoin::{OutPoint, ScriptBuf, Sequence, Transaction, TxIn, TxOut, Weight};
use crate::chain::chaininterface::fee_for_weight;
use crate::events::bump_transaction::{BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT};
use crate::sign::{EntropySource, P2TR_KEY_PATH_WITNESS_WEIGHT, P2WPKH_WITNESS_WEIGHT};
use crate::util::ser::TransactionU16LenLimited;
+use core::ops::Deref;
+
/// The number of received `tx_add_input` messages during a negotiation at which point the
/// negotiation MUST be failed.
const MAX_RECEIVED_TX_ADD_INPUT_COUNT: u16 = 4096;
InsufficientFees,
OutputsValueExceedsInputsValue,
InvalidTx,
-}
-
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub(crate) struct InteractiveTxInput {
- serial_id: SerialId,
- input: TxIn,
- prev_output: TxOut,
-}
-
-#[derive(Debug, Clone, PartialEq, Eq)]
-pub(crate) struct InteractiveTxOutput {
- serial_id: SerialId,
- tx_out: TxOut,
+ /// No funding (shared) output found.
+ MissingFundingOutput,
+ /// More than one funding (shared) output found.
+ DuplicateFundingOutput,
+ /// The intended local part of the funding output is higher than the actual shared funding output,
+ /// if funding output is provided by the peer this is an interop error,
+ /// if provided by the same node than internal input consistency error.
+ InvalidLowFundingOutputValue,
}
#[derive(Debug, Clone, PartialEq, Eq)]
let local_inputs_value_satoshis = context
.inputs
.iter()
- .filter(|(serial_id, _)| {
- !is_serial_id_valid_for_counterparty(context.holder_is_initiator, serial_id)
- })
- .fold(0u64, |value, (_, input)| value.saturating_add(input.prev_output.value.to_sat()));
+ .fold(0u64, |value, (_, input)| value.saturating_add(input.local_value()));
let local_outputs_value_satoshis = context
.outputs
.iter()
- .filter(|(serial_id, _)| {
- !is_serial_id_valid_for_counterparty(context.holder_is_initiator, serial_id)
- })
- .fold(0u64, |value, (_, output)| value.saturating_add(output.tx_out.value.to_sat()));
+ .fold(0u64, |value, (_, output)| value.saturating_add(output.local_value()));
Self {
holder_is_initiator: context.holder_is_initiator,
}
pub fn weight(&self) -> Weight {
- let inputs_weight = self.inputs.iter().fold(
- Weight::from_wu(0),
- |weight, InteractiveTxInput { prev_output, .. }| {
- weight.checked_add(estimate_input_weight(prev_output)).unwrap_or(Weight::MAX)
- },
- );
- let outputs_weight = self.outputs.iter().fold(
- Weight::from_wu(0),
- |weight, InteractiveTxOutput { tx_out, .. }| {
- weight.checked_add(get_output_weight(&tx_out.script_pubkey)).unwrap_or(Weight::MAX)
- },
- );
+ let inputs_weight = self.inputs.iter().fold(Weight::from_wu(0), |weight, input| {
+ weight.checked_add(estimate_input_weight(input.prev_output())).unwrap_or(Weight::MAX)
+ });
+ let outputs_weight = self.outputs.iter().fold(Weight::from_wu(0), |weight, output| {
+ weight.checked_add(get_output_weight(&output.script_pubkey())).unwrap_or(Weight::MAX)
+ });
Weight::from_wu(TX_COMMON_FIELDS_WEIGHT)
.checked_add(inputs_weight)
.and_then(|weight| weight.checked_add(outputs_weight))
// Inputs and outputs must be sorted by serial_id
let ConstructedTransaction { mut inputs, mut outputs, .. } = self;
- inputs.sort_unstable_by_key(|InteractiveTxInput { serial_id, .. }| *serial_id);
- outputs.sort_unstable_by_key(|InteractiveTxOutput { serial_id, .. }| *serial_id);
+ inputs.sort_unstable_by_key(|input| input.serial_id());
+ outputs.sort_unstable_by_key(|output| output.serial_id);
- let input: Vec<TxIn> =
- inputs.into_iter().map(|InteractiveTxInput { input, .. }| input).collect();
+ let input: Vec<TxIn> = inputs.into_iter().map(|input| input.txin().clone()).collect();
let output: Vec<TxOut> =
- outputs.into_iter().map(|InteractiveTxOutput { tx_out, .. }| tx_out).collect();
+ outputs.into_iter().map(|output| output.tx_out().clone()).collect();
Transaction { version: Version::TWO, lock_time: self.lock_time, input, output }
}
received_tx_add_input_count: u16,
received_tx_add_output_count: u16,
inputs: HashMap<SerialId, InteractiveTxInput>,
+ /// The output script intended to be the new funding output script.
+ /// The script pubkey is used to determine which output is the funding output.
+ /// When an output with the same script pubkey is added by any of the nodes, it will be
+ /// treated as the shared output.
+ /// The value is the holder's intended contribution to the shared funding output.
+ /// The rest is the counterparty's contribution.
+ /// When the funding output is added (recognized by its output script pubkey), it will be marked
+ /// as shared, and split between the peers according to the local value.
+ /// If the local value is found to be larger than the actual funding output, an error is generated.
+ expected_shared_funding_output: (ScriptBuf, u64),
+ /// The actual new funding output, set only after the output has actually been added.
+ /// NOTE: this output is also included in `outputs`.
+ actual_new_funding_output: Option<SharedOwnedOutput>,
prevtx_outpoints: HashSet<OutPoint>,
+ /// The outputs added so far.
outputs: HashMap<SerialId, InteractiveTxOutput>,
+ /// The locktime of the funding transaction.
tx_locktime: AbsoluteLockTime,
+ /// The fee rate used for the transaction
feerate_sat_per_kw: u32,
}
}
impl NegotiationContext {
+ fn new(
+ holder_is_initiator: bool, expected_shared_funding_output: (ScriptBuf, u64),
+ tx_locktime: AbsoluteLockTime, feerate_sat_per_kw: u32,
+ ) -> Self {
+ NegotiationContext {
+ holder_is_initiator,
+ received_tx_add_input_count: 0,
+ received_tx_add_output_count: 0,
+ inputs: new_hash_map(),
+ expected_shared_funding_output,
+ actual_new_funding_output: None,
+ prevtx_outpoints: new_hash_set(),
+ outputs: new_hash_map(),
+ tx_locktime,
+ feerate_sat_per_kw,
+ }
+ }
+
+ fn set_actual_new_funding_output(
+ &mut self, tx_out: TxOut,
+ ) -> Result<SharedOwnedOutput, AbortReason> {
+ if self.actual_new_funding_output.is_some() {
+ return Err(AbortReason::DuplicateFundingOutput);
+ }
+ let value = tx_out.value.to_sat();
+ let local_owned = self.expected_shared_funding_output.1;
+ // Sanity check
+ if local_owned > value {
+ return Err(AbortReason::InvalidLowFundingOutputValue);
+ }
+ let shared_output = SharedOwnedOutput::new(tx_out, local_owned);
+ self.actual_new_funding_output = Some(shared_output.clone());
+ Ok(shared_output)
+ }
+
fn is_serial_id_valid_for_counterparty(&self, serial_id: &SerialId) -> bool {
is_serial_id_valid_for_counterparty(self.holder_is_initiator, serial_id)
}
fn remote_inputs_value(&self) -> u64 {
- self.inputs
- .iter()
- .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
- .fold(0u64, |acc, (_, InteractiveTxInput { prev_output, .. })| {
- acc.saturating_add(prev_output.value.to_sat())
- })
+ self.inputs.iter().fold(0u64, |acc, (_, input)| acc.saturating_add(input.remote_value()))
}
fn remote_outputs_value(&self) -> u64 {
- self.outputs
- .iter()
- .filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
- .fold(0u64, |acc, (_, InteractiveTxOutput { tx_out, .. })| {
- acc.saturating_add(tx_out.value.to_sat())
- })
+ self.outputs.iter().fold(0u64, |acc, (_, output)| acc.saturating_add(output.remote_value()))
}
fn remote_inputs_weight(&self) -> Weight {
self.inputs
.iter()
.filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
- .fold(0u64, |weight, (_, InteractiveTxInput { prev_output, .. })| {
- weight.saturating_add(estimate_input_weight(prev_output).to_wu())
+ .fold(0u64, |weight, (_, input)| {
+ weight.saturating_add(estimate_input_weight(input.prev_output()).to_wu())
}),
)
}
self.outputs
.iter()
.filter(|(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
- .fold(0u64, |weight, (_, InteractiveTxOutput { tx_out, .. })| {
- weight.saturating_add(get_output_weight(&tx_out.script_pubkey).to_wu())
+ .fold(0u64, |weight, (_, output)| {
+ weight.saturating_add(get_output_weight(&output.script_pubkey()).to_wu())
}),
)
}
},
hash_map::Entry::Vacant(entry) => {
let prev_outpoint = OutPoint { txid, vout: msg.prevtx_out };
- entry.insert(InteractiveTxInput {
+ entry.insert(InteractiveTxInput::Remote(LocalOrRemoteInput {
serial_id: msg.serial_id,
input: TxIn {
previous_output: prev_outpoint,
..Default::default()
},
prev_output: prev_out,
- });
+ }));
self.prevtx_outpoints.insert(prev_outpoint);
Ok(())
},
// bitcoin supply.
let mut outputs_value: u64 = 0;
for output in self.outputs.iter() {
- outputs_value = outputs_value.saturating_add(output.1.tx_out.value.to_sat());
+ outputs_value = outputs_value.saturating_add(output.1.value());
}
if outputs_value.saturating_add(msg.sats) > TOTAL_BITCOIN_SUPPLY_SATOSHIS {
// The receiving node:
return Err(AbortReason::InvalidOutputScript);
}
+ let txout = TxOut { value: Amount::from_sat(msg.sats), script_pubkey: msg.script.clone() };
+ let is_shared = msg.script == self.expected_shared_funding_output.0;
+ let output = if is_shared {
+ // this is a shared funding output
+ let shared_output = self.set_actual_new_funding_output(txout)?;
+ InteractiveTxOutput {
+ serial_id: msg.serial_id,
+ added_by: AddingRole::Remote,
+ output: OutputOwned::Shared(shared_output),
+ }
+ } else {
+ InteractiveTxOutput {
+ serial_id: msg.serial_id,
+ added_by: AddingRole::Remote,
+ output: OutputOwned::Single(txout),
+ }
+ };
match self.outputs.entry(msg.serial_id) {
hash_map::Entry::Occupied(_) => {
// The receiving node:
Err(AbortReason::DuplicateSerialId)
},
hash_map::Entry::Vacant(entry) => {
- entry.insert(InteractiveTxOutput {
- serial_id: msg.serial_id,
- tx_out: TxOut {
- value: Amount::from_sat(msg.sats),
- script_pubkey: msg.script.clone(),
- },
- });
+ entry.insert(output);
Ok(())
},
}
fn sent_tx_add_input(&mut self, msg: &msgs::TxAddInput) -> Result<(), AbortReason> {
let tx = msg.prevtx.as_transaction();
- let input = TxIn {
+ let txin = TxIn {
previous_output: OutPoint { txid: tx.txid(), vout: msg.prevtx_out },
sequence: Sequence(msg.sequence),
..Default::default()
};
- let prev_output =
- tx.output.get(msg.prevtx_out as usize).ok_or(AbortReason::PrevTxOutInvalid)?.clone();
- if !self.prevtx_outpoints.insert(input.previous_output) {
+ if !self.prevtx_outpoints.insert(txin.previous_output.clone()) {
// We have added an input that already exists
return Err(AbortReason::PrevTxOutInvalid);
}
- self.inputs.insert(
- msg.serial_id,
- InteractiveTxInput { serial_id: msg.serial_id, input, prev_output },
- );
+ let vout = txin.previous_output.vout as usize;
+ let prev_output = tx.output.get(vout).ok_or(AbortReason::PrevTxOutInvalid)?.clone();
+ let input = InteractiveTxInput::Local(LocalOrRemoteInput {
+ serial_id: msg.serial_id,
+ input: txin,
+ prev_output,
+ });
+ self.inputs.insert(msg.serial_id, input);
Ok(())
}
fn sent_tx_add_output(&mut self, msg: &msgs::TxAddOutput) -> Result<(), AbortReason> {
- self.outputs.insert(
- msg.serial_id,
+ let txout = TxOut { value: Amount::from_sat(msg.sats), script_pubkey: msg.script.clone() };
+ let is_shared = msg.script == self.expected_shared_funding_output.0;
+ let output = if is_shared {
+ // this is a shared funding output
+ let shared_output = self.set_actual_new_funding_output(txout)?;
InteractiveTxOutput {
serial_id: msg.serial_id,
- tx_out: TxOut {
- value: Amount::from_sat(msg.sats),
- script_pubkey: msg.script.clone(),
- },
- },
- );
+ added_by: AddingRole::Local,
+ output: OutputOwned::Shared(shared_output),
+ }
+ } else {
+ InteractiveTxOutput {
+ serial_id: msg.serial_id,
+ added_by: AddingRole::Local,
+ output: OutputOwned::Single(txout),
+ }
+ };
+ self.outputs.insert(msg.serial_id, output);
Ok(())
}
return Err(AbortReason::ExceededNumberOfInputsOrOutputs);
}
+ if self.actual_new_funding_output.is_none() {
+ return Err(AbortReason::MissingFundingOutput);
+ }
+
// - the peer's paid feerate does not meet or exceed the agreed feerate (based on the minimum fee).
self.check_counterparty_fees(remote_inputs_value.saturating_sub(remote_outputs_value))?;
}
impl StateMachine {
- fn new(feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime) -> Self {
- let context = NegotiationContext {
+ fn new(
+ feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime,
+ expected_shared_funding_output: (ScriptBuf, u64),
+ ) -> Self {
+ let context = NegotiationContext::new(
+ is_initiator,
+ expected_shared_funding_output,
tx_locktime,
- holder_is_initiator: is_initiator,
- received_tx_add_input_count: 0,
- received_tx_add_output_count: 0,
- inputs: new_hash_map(),
- prevtx_outpoints: new_hash_set(),
- outputs: new_hash_map(),
feerate_sat_per_kw,
- };
+ );
if is_initiator {
Self::ReceivedChangeMsg(ReceivedChangeMsg(context))
} else {
]);
}
+#[derive(Clone, Copy, Debug, Eq, PartialEq)]
+pub enum AddingRole {
+ Local,
+ Remote,
+}
+
+/// Represents an input -- local or remote (both have the same fields)
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct LocalOrRemoteInput {
+ serial_id: SerialId,
+ input: TxIn,
+ prev_output: TxOut,
+}
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum InteractiveTxInput {
+ Local(LocalOrRemoteInput),
+ Remote(LocalOrRemoteInput),
+ // TODO(splicing) SharedInput should be added
+}
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct SharedOwnedOutput {
+ tx_out: TxOut,
+ local_owned: u64,
+}
+
+impl SharedOwnedOutput {
+ fn new(tx_out: TxOut, local_owned: u64) -> SharedOwnedOutput {
+ debug_assert!(
+ local_owned <= tx_out.value.to_sat(),
+ "SharedOwnedOutput: Inconsistent local_owned value {}, larger than output value {}",
+ local_owned,
+ tx_out.value
+ );
+ SharedOwnedOutput { tx_out, local_owned }
+ }
+
+ fn remote_owned(&self) -> u64 {
+ self.tx_out.value.to_sat().saturating_sub(self.local_owned)
+ }
+}
+
+/// Represents an output, with information about
+/// its control -- exclusive by the adder or shared --, and
+/// its ownership -- value fully owned by the adder or jointly
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum OutputOwned {
+ /// Belongs to local node -- controlled exclusively and fully belonging to local node
+ Single(TxOut),
+ /// Output with shared control, but fully belonging to local node
+ SharedControlFullyOwned(TxOut),
+ /// Output with shared control and joint ownership
+ Shared(SharedOwnedOutput),
+}
+
+impl OutputOwned {
+ fn tx_out(&self) -> &TxOut {
+ match self {
+ OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => tx_out,
+ OutputOwned::Shared(output) => &output.tx_out,
+ }
+ }
+
+ fn value(&self) -> u64 {
+ self.tx_out().value.to_sat()
+ }
+
+ fn is_shared(&self) -> bool {
+ match self {
+ OutputOwned::Single(_) => false,
+ OutputOwned::SharedControlFullyOwned(_) => true,
+ OutputOwned::Shared(_) => true,
+ }
+ }
+
+ fn local_value(&self, local_role: AddingRole) -> u64 {
+ match self {
+ OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => {
+ match local_role {
+ AddingRole::Local => tx_out.value.to_sat(),
+ AddingRole::Remote => 0,
+ }
+ },
+ OutputOwned::Shared(output) => output.local_owned,
+ }
+ }
+
+ fn remote_value(&self, local_role: AddingRole) -> u64 {
+ match self {
+ OutputOwned::Single(tx_out) | OutputOwned::SharedControlFullyOwned(tx_out) => {
+ match local_role {
+ AddingRole::Local => 0,
+ AddingRole::Remote => tx_out.value.to_sat(),
+ }
+ },
+ OutputOwned::Shared(output) => output.remote_owned(),
+ }
+ }
+}
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub struct InteractiveTxOutput {
+ serial_id: SerialId,
+ added_by: AddingRole,
+ output: OutputOwned,
+}
+
+impl InteractiveTxOutput {
+ fn tx_out(&self) -> &TxOut {
+ self.output.tx_out()
+ }
+
+ fn value(&self) -> u64 {
+ self.tx_out().value.to_sat()
+ }
+
+ fn local_value(&self) -> u64 {
+ self.output.local_value(self.added_by)
+ }
+
+ fn remote_value(&self) -> u64 {
+ self.output.remote_value(self.added_by)
+ }
+
+ fn script_pubkey(&self) -> &ScriptBuf {
+ &self.output.tx_out().script_pubkey
+ }
+}
+
+impl InteractiveTxInput {
+ pub fn serial_id(&self) -> SerialId {
+ match self {
+ InteractiveTxInput::Local(input) => input.serial_id,
+ InteractiveTxInput::Remote(input) => input.serial_id,
+ }
+ }
+
+ pub fn txin(&self) -> &TxIn {
+ match self {
+ InteractiveTxInput::Local(input) => &input.input,
+ InteractiveTxInput::Remote(input) => &input.input,
+ }
+ }
+
+ pub fn prev_output(&self) -> &TxOut {
+ match self {
+ InteractiveTxInput::Local(input) => &input.prev_output,
+ InteractiveTxInput::Remote(input) => &input.prev_output,
+ }
+ }
+
+ pub fn value(&self) -> u64 {
+ self.prev_output().value.to_sat()
+ }
+
+ pub fn local_value(&self) -> u64 {
+ match self {
+ InteractiveTxInput::Local(input) => input.prev_output.value.to_sat(),
+ InteractiveTxInput::Remote(_input) => 0,
+ }
+ }
+
+ pub fn remote_value(&self) -> u64 {
+ match self {
+ InteractiveTxInput::Local(_input) => 0,
+ InteractiveTxInput::Remote(input) => input.prev_output.value.to_sat(),
+ }
+ }
+}
+
pub(crate) struct InteractiveTxConstructor {
state_machine: StateMachine,
channel_id: ChannelId,
inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)>,
- outputs_to_contribute: Vec<(SerialId, TxOut)>,
+ outputs_to_contribute: Vec<(SerialId, OutputOwned)>,
}
pub(crate) enum InteractiveTxMessageSend {
impl InteractiveTxConstructor {
/// Instantiates a new `InteractiveTxConstructor`.
///
+ /// `expected_remote_shared_funding_output`: In the case when the local node doesn't
+ /// add a shared output, but it expects a shared output to be added by the remote node,
+ /// it has to specify the script pubkey, used to determine the shared output,
+ /// and its (local) contribution from the shared output:
+ /// 0 when the whole value belongs to the remote node, or
+ /// positive if owned also by local.
+ /// Note: The local value cannot be larger that the actual shared output.
+ ///
/// A tuple is returned containing the newly instantiate `InteractiveTxConstructor` and optionally
/// an initial wrapped `Tx_` message which the holder needs to send to the counterparty.
pub fn new<ES: Deref>(
entropy_source: &ES, channel_id: ChannelId, feerate_sat_per_kw: u32, is_initiator: bool,
funding_tx_locktime: AbsoluteLockTime,
inputs_to_contribute: Vec<(TxIn, TransactionU16LenLimited)>,
- outputs_to_contribute: Vec<TxOut>,
- ) -> (Self, Option<InteractiveTxMessageSend>)
+ outputs_to_contribute: Vec<OutputOwned>,
+ expected_remote_shared_funding_output: Option<(ScriptBuf, u64)>,
+ ) -> Result<(Self, Option<InteractiveTxMessageSend>), AbortReason>
where
ES::Target: EntropySource,
{
- let state_machine =
- StateMachine::new(feerate_sat_per_kw, is_initiator, funding_tx_locktime);
- let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> =
- inputs_to_contribute
+ // Sanity check: There can be at most one shared output, local-added or remote-added
+ let mut expected_shared_funding_output: Option<(ScriptBuf, u64)> = None;
+ for output in &outputs_to_contribute {
+ let new_output = match output {
+ OutputOwned::Single(_tx_out) => None,
+ OutputOwned::SharedControlFullyOwned(tx_out) => {
+ Some((tx_out.script_pubkey.clone(), tx_out.value.to_sat()))
+ },
+ OutputOwned::Shared(output) => {
+ // Sanity check
+ if output.local_owned > output.tx_out.value.to_sat() {
+ return Err(AbortReason::InvalidLowFundingOutputValue);
+ }
+ Some((output.tx_out.script_pubkey.clone(), output.local_owned))
+ },
+ };
+ if new_output.is_some() {
+ if expected_shared_funding_output.is_some()
+ || expected_remote_shared_funding_output.is_some()
+ {
+ // more than one local-added shared output or
+ // one local-added and one remote-expected shared output
+ return Err(AbortReason::DuplicateFundingOutput);
+ }
+ expected_shared_funding_output = new_output;
+ }
+ }
+ if let Some(expected_remote_shared_funding_output) = expected_remote_shared_funding_output {
+ expected_shared_funding_output = Some(expected_remote_shared_funding_output);
+ }
+ if let Some(expected_shared_funding_output) = expected_shared_funding_output {
+ let state_machine = StateMachine::new(
+ feerate_sat_per_kw,
+ is_initiator,
+ funding_tx_locktime,
+ expected_shared_funding_output,
+ );
+ let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> =
+ inputs_to_contribute
+ .into_iter()
+ .map(|(input, tx)| {
+ let serial_id = generate_holder_serial_id(entropy_source, is_initiator);
+ (serial_id, input, tx)
+ })
+ .collect();
+ // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs
+ // as the user passed them to us to avoid leaking any potential categorization of transactions
+ // before we pass any of the inputs to the counterparty.
+ inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id);
+ let mut outputs_to_contribute: Vec<_> = outputs_to_contribute
.into_iter()
- .map(|(input, tx)| {
+ .map(|output| {
let serial_id = generate_holder_serial_id(entropy_source, is_initiator);
- (serial_id, input, tx)
+ (serial_id, output)
})
.collect();
- // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs
- // as the user passed them to us to avoid leaking any potential categorization of transactions
- // before we pass any of the inputs to the counterparty.
- inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id);
- let mut outputs_to_contribute: Vec<(SerialId, TxOut)> = outputs_to_contribute
- .into_iter()
- .map(|output| {
- let serial_id = generate_holder_serial_id(entropy_source, is_initiator);
- (serial_id, output)
- })
- .collect();
- // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs.
- outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id);
- let mut constructor =
- Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute };
- let message_send = if is_initiator {
- match constructor.maybe_send_message() {
- Ok(msg_send) => Some(msg_send),
- Err(_) => {
- debug_assert!(
- false,
- "We should always be able to start our state machine successfully"
- );
- None
- },
- }
+ // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs.
+ outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id);
+ let mut constructor =
+ Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute };
+ let message_send = if is_initiator {
+ match constructor.maybe_send_message() {
+ Ok(msg_send) => Some(msg_send),
+ Err(_) => {
+ debug_assert!(
+ false,
+ "We should always be able to start our state machine successfully"
+ );
+ None
+ },
+ }
+ } else {
+ None
+ };
+ Ok((constructor, message_send))
} else {
- None
- };
- (constructor, message_send)
+ Err(AbortReason::MissingFundingOutput)
+ }
}
fn maybe_send_message(&mut self) -> Result<InteractiveTxMessageSend, AbortReason> {
let msg = msgs::TxAddOutput {
channel_id: self.channel_id,
serial_id,
- sats: output.value.to_sat(),
- script: output.script_pubkey,
+ sats: output.tx_out().value.to_sat(),
+ script: output.tx_out().script_pubkey.clone(),
};
do_state_transition!(self, sent_tx_add_output, &msg)?;
Ok(InteractiveTxMessageSend::TxAddOutput(msg))
use crate::sign::EntropySource;
use crate::util::atomic_counter::AtomicCounter;
use crate::util::ser::TransactionU16LenLimited;
+ use bitcoin::absolute::LockTime as AbsoluteLockTime;
use bitcoin::amount::Amount;
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::script::Builder;
use bitcoin::secp256k1::{Keypair, Secp256k1};
use bitcoin::transaction::Version;
use bitcoin::{
- absolute::LockTime as AbsoluteLockTime, OutPoint, Sequence, Transaction, TxIn, TxOut,
+ OutPoint, PubkeyHash, ScriptBuf, Sequence, Transaction, TxIn, TxOut, WPubkeyHash,
};
- use bitcoin::{PubkeyHash, ScriptBuf, WPubkeyHash, WScriptHash};
use core::ops::Deref;
use super::{
- get_output_weight, P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND,
+ get_output_weight, AddingRole, OutputOwned, SharedOwnedOutput,
+ P2TR_INPUT_WEIGHT_LOWER_BOUND, P2WPKH_INPUT_WEIGHT_LOWER_BOUND,
P2WSH_INPUT_WEIGHT_LOWER_BOUND, TX_COMMON_FIELDS_WEIGHT,
};
struct TestSession {
description: &'static str,
inputs_a: Vec<(TxIn, TransactionU16LenLimited)>,
- outputs_a: Vec<TxOut>,
+ outputs_a: Vec<OutputOwned>,
inputs_b: Vec<(TxIn, TransactionU16LenLimited)>,
- outputs_b: Vec<TxOut>,
+ outputs_b: Vec<OutputOwned>,
expect_error: Option<(AbortReason, ErrorCulprit)>,
+ /// A node adds no shared output, but expects the peer to add one, with the specific script pubkey, and local contribution
+ a_expected_remote_shared_output: Option<(ScriptBuf, u64)>,
+ /// B node adds no shared output, but expects the peer to add one, with the specific script pubkey, and local contribution
+ b_expected_remote_shared_output: Option<(ScriptBuf, u64)>,
}
fn do_test_interactive_tx_constructor(session: TestSession) {
let channel_id = ChannelId(entropy_source.get_secure_random_bytes());
let tx_locktime = AbsoluteLockTime::from_height(1337).unwrap();
- let (mut constructor_a, first_message_a) = InteractiveTxConstructor::new(
+ // funding output sanity check
+ let shared_outputs_by_a: Vec<_> =
+ session.outputs_a.iter().filter(|o| o.is_shared()).collect();
+ if shared_outputs_by_a.len() > 1 {
+ println!("Test warning: Expected at most one shared output. NodeA");
+ }
+ let shared_output_by_a = if shared_outputs_by_a.len() >= 1 {
+ Some(shared_outputs_by_a[0].value())
+ } else {
+ None
+ };
+ let shared_outputs_by_b: Vec<_> =
+ session.outputs_b.iter().filter(|o| o.is_shared()).collect();
+ if shared_outputs_by_b.len() > 1 {
+ println!("Test warning: Expected at most one shared output. NodeB");
+ }
+ let shared_output_by_b = if shared_outputs_by_b.len() >= 1 {
+ Some(shared_outputs_by_b[0].value())
+ } else {
+ None
+ };
+ if session.a_expected_remote_shared_output.is_some()
+ || session.b_expected_remote_shared_output.is_some()
+ {
+ let expected_by_a = if let Some(a_expected_remote_shared_output) =
+ &session.a_expected_remote_shared_output
+ {
+ a_expected_remote_shared_output.1
+ } else {
+ if shared_outputs_by_a.len() >= 1 {
+ shared_outputs_by_a[0].local_value(AddingRole::Local)
+ } else {
+ 0
+ }
+ };
+ let expected_by_b = if let Some(b_expected_remote_shared_output) =
+ &session.b_expected_remote_shared_output
+ {
+ b_expected_remote_shared_output.1
+ } else {
+ if shared_outputs_by_b.len() >= 1 {
+ shared_outputs_by_b[0].local_value(AddingRole::Local)
+ } else {
+ 0
+ }
+ };
+
+ let expected_sum = expected_by_a + expected_by_b;
+ let actual_shared_output =
+ shared_output_by_a.unwrap_or(shared_output_by_b.unwrap_or(0));
+ if expected_sum != actual_shared_output {
+ println!("Test warning: Sum of expected shared output values does not match actual shared output value, {} {} {} {} {} {}", expected_sum, actual_shared_output, expected_by_a, expected_by_b, shared_output_by_a.unwrap_or(0), shared_output_by_b.unwrap_or(0));
+ }
+ }
+
+ let (mut constructor_a, first_message_a) = match InteractiveTxConstructor::new(
entropy_source,
channel_id,
TEST_FEERATE_SATS_PER_KW,
true,
tx_locktime,
session.inputs_a,
- session.outputs_a,
- );
- let (mut constructor_b, first_message_b) = InteractiveTxConstructor::new(
+ session.outputs_a.iter().map(|o| o.clone()).collect(),
+ session.a_expected_remote_shared_output,
+ ) {
+ Ok(r) => r,
+ Err(abort_reason) => {
+ assert_eq!(
+ Some((abort_reason, ErrorCulprit::NodeA)),
+ session.expect_error,
+ "Test: {}",
+ session.description
+ );
+ return;
+ },
+ };
+ let (mut constructor_b, first_message_b) = match InteractiveTxConstructor::new(
entropy_source,
channel_id,
TEST_FEERATE_SATS_PER_KW,
false,
tx_locktime,
session.inputs_b,
- session.outputs_b,
- );
+ session.outputs_b.iter().map(|o| o.clone()).collect(),
+ session.b_expected_remote_shared_output,
+ ) {
+ Ok(r) => r,
+ Err(abort_reason) => {
+ assert_eq!(
+ Some((abort_reason, ErrorCulprit::NodeB)),
+ session.expect_error,
+ "Test: {}",
+ session.description
+ );
+ return;
+ },
+ };
let handle_message_send =
|msg: InteractiveTxMessageSend, for_constructor: &mut InteractiveTxConstructor| {
"Test: {}",
session.description
);
- assert!(message_send_b.is_none());
+ assert!(message_send_b.is_none(), "Test: {}", session.description);
return;
},
}
"Test: {}",
session.description
);
- assert!(message_send_a.is_none());
+ assert!(message_send_a.is_none(), "Test: {}", session.description);
return;
},
}
assert!(message_send_a.is_none());
assert!(message_send_b.is_none());
assert_eq!(final_tx_a.unwrap().into_unsigned_tx(), final_tx_b.unwrap().into_unsigned_tx());
- assert!(session.expect_error.is_none(), "Test: {}", session.description);
+ assert!(
+ session.expect_error.is_none(),
+ "Missing expected error {:?}, Test: {}",
+ session.expect_error,
+ session.description,
+ );
}
#[derive(Debug, Clone, Copy)]
enum TestOutput {
P2WPKH(u64),
+ /// P2WSH, but with the specific script used for the funding output
P2WSH(u64),
P2TR(u64),
// Non-witness type to test rejection.
fn generate_txout(output: &TestOutput) -> TxOut {
let secp_ctx = Secp256k1::new();
let (value, script_pubkey) = match output {
- TestOutput::P2WPKH(value) => {
- (*value, ScriptBuf::new_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap()))
- },
- TestOutput::P2WSH(value) => {
- (*value, ScriptBuf::new_p2wsh(&WScriptHash::from_slice(&[2; 32]).unwrap()))
- },
+ TestOutput::P2WPKH(value) => (*value, generate_p2wpkh_script_pubkey()),
+ TestOutput::P2WSH(value) => (*value, generate_funding_script_pubkey()),
TestOutput::P2TR(value) => (
*value,
ScriptBuf::new_p2tr(
ScriptBuf::new_p2wpkh(&WPubkeyHash::from_slice(&[1; 20]).unwrap())
}
- fn generate_outputs(outputs: &[TestOutput]) -> Vec<TxOut> {
- outputs.iter().map(generate_txout).collect()
+ fn generate_funding_script_pubkey() -> ScriptBuf {
+ Builder::new().push_int(33).into_script().to_p2wsh()
+ }
+
+ fn generate_output_nonfunding_one(output: &TestOutput) -> OutputOwned {
+ OutputOwned::Single(generate_txout(output))
+ }
+
+ fn generate_outputs(outputs: &[TestOutput]) -> Vec<OutputOwned> {
+ outputs.iter().map(|o| generate_output_nonfunding_one(o)).collect()
+ }
+
+ /// Generate a single output that is the funding output
+ fn generate_output(output: &TestOutput) -> Vec<OutputOwned> {
+ vec![OutputOwned::SharedControlFullyOwned(generate_txout(output))]
+ }
+
+ /// Generate a single P2WSH output that is the funding output
+ fn generate_funding_output(value: u64) -> Vec<OutputOwned> {
+ generate_output(&TestOutput::P2WSH(value))
+ }
+
+ /// Generate a single P2WSH output with shared contribution that is the funding output
+ fn generate_shared_funding_output_one(value: u64, local_value: u64) -> OutputOwned {
+ OutputOwned::Shared(SharedOwnedOutput {
+ tx_out: generate_txout(&TestOutput::P2WSH(value)),
+ local_owned: local_value,
+ })
+ }
+
+ /// Generate a single P2WSH output with shared contribution that is the funding output
+ fn generate_shared_funding_output(value: u64, local_value: u64) -> Vec<OutputOwned> {
+ vec![generate_shared_funding_output_one(value, local_value)]
}
fn generate_fixed_number_of_inputs(count: u16) -> Vec<(TxIn, TransactionU16LenLimited)> {
inputs
}
- fn generate_fixed_number_of_outputs(count: u16) -> Vec<TxOut> {
+ fn generate_fixed_number_of_outputs(count: u16) -> Vec<OutputOwned> {
// Set a constant value for each TxOut
generate_outputs(&vec![TestOutput::P2WPKH(1_000_000); count as usize])
}
Builder::new().push_opcode(opcodes::OP_TRUE).into_script().to_p2sh()
}
- fn generate_non_witness_output(value: u64) -> TxOut {
- TxOut { value: Amount::from_sat(value), script_pubkey: generate_p2sh_script_pubkey() }
+ fn generate_non_witness_output(value: u64) -> OutputOwned {
+ OutputOwned::Single(TxOut {
+ value: Amount::from_sat(value),
+ script_pubkey: generate_p2sh_script_pubkey(),
+ })
}
#[test]
outputs_a: vec![],
inputs_b: vec![],
outputs_b: vec![],
- expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ expect_error: Some((AbortReason::MissingFundingOutput, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: None,
});
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution, no initiator inputs",
inputs_a: vec![],
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution, no initiator outputs",
outputs_a: vec![],
inputs_b: vec![],
outputs_b: vec![],
- expect_error: None,
+ expect_error: Some((AbortReason::MissingFundingOutput, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: None,
});
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution, no fees",
inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
let p2wpkh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WPKH_INPUT_WEIGHT_LOWER_BOUND);
let outputs_fee = fee_for_weight(
);
let tx_common_fields_fee =
fee_for_weight(TEST_FEERATE_SATS_PER_KW, TX_COMMON_FIELDS_WEIGHT);
+
+ let amount_adjusted_with_p2wpkh_fee =
+ 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee;
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution, with P2WPKH input, insufficient fees",
inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(
- 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */
- )]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(
+ amount_adjusted_with_p2wpkh_fee + 1, /* makes fees insuffcient for initiator */
+ )),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution with P2WPKH input, sufficient fees",
inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(
- 1_000_000 - p2wpkh_fee - outputs_fee - tx_common_fields_fee,
- )]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2wpkh_fee)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: None,
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
let p2wsh_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2WSH_INPUT_WEIGHT_LOWER_BOUND);
+ let amount_adjusted_with_p2wsh_fee =
+ 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee;
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution, with P2WSH input, insufficient fees",
inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(
- 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */
- )]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(
+ amount_adjusted_with_p2wsh_fee + 1, /* makes fees insuffcient for initiator */
+ )),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution with P2WSH input, sufficient fees",
inputs_a: generate_inputs(&[TestOutput::P2WSH(1_000_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(
- 1_000_000 - p2wsh_fee - outputs_fee - tx_common_fields_fee,
- )]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2wsh_fee)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: None,
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
let p2tr_fee = fee_for_weight(TEST_FEERATE_SATS_PER_KW, P2TR_INPUT_WEIGHT_LOWER_BOUND);
+ let amount_adjusted_with_p2tr_fee =
+ 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee;
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution, with P2TR input, insufficient fees",
inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(
- 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee + 1, /* makes fees insuffcient for initiator */
- )]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(
+ amount_adjusted_with_p2tr_fee + 1, /* makes fees insuffcient for initiator */
+ )),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Single contribution with P2TR input, sufficient fees",
inputs_a: generate_inputs(&[TestOutput::P2TR(1_000_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(
- 1_000_000 - p2tr_fee - outputs_fee - tx_common_fields_fee,
- )]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(amount_adjusted_with_p2tr_fee)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: None,
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Initiator contributes sufficient fees, but non-initiator does not",
inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
outputs_a: vec![],
inputs_b: generate_inputs(&[TestOutput::P2WPKH(100_000)]),
- outputs_b: generate_outputs(&[TestOutput::P2WPKH(100_000)]),
+ outputs_b: generate_output(&TestOutput::P2WPKH(100_000)),
expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeB)),
+ a_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
+ b_expected_remote_shared_output: None,
});
do_test_interactive_tx_constructor(TestSession {
description: "Multi-input-output contributions from both sides",
inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000); 2]),
- outputs_a: generate_outputs(&[
- TestOutput::P2WPKH(1_000_000),
- TestOutput::P2WPKH(200_000),
- ]),
+ outputs_a: vec![
+ generate_shared_funding_output_one(1_000_000, 200_000),
+ generate_output_nonfunding_one(&TestOutput::P2WPKH(200_000)),
+ ],
inputs_b: generate_inputs(&[
TestOutput::P2WPKH(1_000_000),
TestOutput::P2WPKH(500_000),
]),
- outputs_b: generate_outputs(&[
- TestOutput::P2WPKH(1_000_000),
- TestOutput::P2WPKH(400_000),
- ]),
+ outputs_b: vec![generate_output_nonfunding_one(&TestOutput::P2WPKH(400_000))],
expect_error: None,
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 800_000)),
});
do_test_interactive_tx_constructor(TestSession {
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
let tx =
do_test_interactive_tx_constructor(TestSession {
description: "Invalid input sequence from initiator",
inputs_a: vec![(invalid_sequence_input, tx.clone())],
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::IncorrectInputSequenceValue, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
let duplicate_input = TxIn {
previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
do_test_interactive_tx_constructor(TestSession {
description: "Duplicate prevout from initiator",
inputs_a: vec![(duplicate_input.clone(), tx.clone()), (duplicate_input, tx.clone())],
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeB)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
+ // Non-initiator uses same prevout as initiator.
let duplicate_input = TxIn {
previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
do_test_interactive_tx_constructor(TestSession {
description: "Non-initiator uses same prevout as initiator",
inputs_a: vec![(duplicate_input.clone(), tx.clone())],
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_shared_funding_output(1_000_000, 905_000),
inputs_b: vec![(duplicate_input.clone(), tx.clone())],
outputs_b: vec![],
expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 95_000)),
+ });
+ let duplicate_input = TxIn {
+ previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Non-initiator uses same prevout as initiator",
+ inputs_a: vec![(duplicate_input.clone(), tx.clone())],
+ outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)),
+ inputs_b: vec![(duplicate_input.clone(), tx.clone())],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_p2wpkh_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Initiator sends too many TxAddInputs",
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::ReceivedTooManyTxAddInputs, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor_with_entropy_source(
TestSession {
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
},
&DuplicateEntropySource,
);
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::ReceivedTooManyTxAddOutputs, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Initiator sends an output below dust value",
inputs_a: vec![],
- outputs_a: generate_outputs(&[TestOutput::P2WSH(
+ outputs_a: generate_funding_output(
generate_p2wsh_script_pubkey().dust_value().to_sat() - 1,
- )]),
+ ),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::BelowDustLimit, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Initiator sends an output above maximum sats allowed",
inputs_a: vec![],
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1)]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::ExceededMaximumSatsAllowed, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Initiator sends an output without a witness program",
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::InvalidOutputScript, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor_with_entropy_source(
TestSession {
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
},
&DuplicateEntropySource,
);
do_test_interactive_tx_constructor(TestSession {
description: "Peer contributed more output value than inputs",
inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000)]),
- outputs_a: generate_outputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_output(&TestOutput::P2WPKH(1_000_000)),
inputs_b: vec![],
outputs_b: vec![],
expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
AbortReason::ExceededNumberOfInputsOrOutputs,
ErrorCulprit::Indeterminate,
)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
});
do_test_interactive_tx_constructor(TestSession {
description: "Peer contributed more than allowed number of outputs",
AbortReason::ExceededNumberOfInputsOrOutputs,
ErrorCulprit::Indeterminate,
)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ });
+
+ // Adding multiple outputs to the funding output pubkey is an error
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Adding two outputs to the funding output pubkey",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(1_000_000)]),
+ outputs_a: generate_funding_output(100_000),
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(1_001_000)]),
+ outputs_b: generate_funding_output(100_000),
+ expect_error: Some((AbortReason::DuplicateFundingOutput, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: None,
+ });
+
+ // We add the funding output, but we contribute a little
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Funding output by us, small contribution",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(12_000)]),
+ outputs_a: generate_shared_funding_output(1_000_000, 10_000),
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(992_000)]),
+ outputs_b: vec![],
+ expect_error: None,
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 990_000)),
+ });
+
+ // They add the funding output, and we contribute a little
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Funding output by them, small contribution",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(12_000)]),
+ outputs_a: vec![],
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(992_000)]),
+ outputs_b: generate_shared_funding_output(1_000_000, 990_000),
+ expect_error: None,
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 10_000)),
+ b_expected_remote_shared_output: None,
+ });
+
+ // We add the funding output, and we contribute most
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Funding output by us, large contribution",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(992_000)]),
+ outputs_a: generate_shared_funding_output(1_000_000, 990_000),
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(12_000)]),
+ outputs_b: vec![],
+ expect_error: None,
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 10_000)),
+ });
+
+ // They add the funding output, but we contribute most
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Funding output by them, large contribution",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(992_000)]),
+ outputs_a: vec![],
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(12_000)]),
+ outputs_b: generate_shared_funding_output(1_000_000, 10_000),
+ expect_error: None,
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 990_000)),
+ b_expected_remote_shared_output: None,
+ });
+
+ // During a splice-out, with peer providing more output value than input value
+ // but still pays enough fees due to their to_remote_value_satoshis portion in
+ // the shared input.
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Splice out with sufficient initiator balance",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(50_000)]),
+ outputs_a: generate_funding_output(120_000),
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(50_000)]),
+ outputs_b: vec![],
+ expect_error: None,
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ });
+
+ // During a splice-out, with peer providing more output value than input value
+ // and the to_remote_value_satoshis portion in
+ // the shared input cannot cover fees
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Splice out with insufficient initiator balance",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]),
+ outputs_a: generate_funding_output(120_000),
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]),
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 0)),
+ });
+
+ // The actual funding output value is lower than the intended local contribution by the same node
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Splice in, invalid intended local contribution",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]),
+ outputs_a: generate_shared_funding_output(100_000, 120_000), // local value is higher than the output value
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]),
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InvalidLowFundingOutputValue, ErrorCulprit::NodeA)),
+ a_expected_remote_shared_output: None,
+ b_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 20_000)),
+ });
+
+ // The actual funding output value is lower than the intended local contribution of the other node
+ do_test_interactive_tx_constructor(TestSession {
+ description: "Splice in, invalid intended local contribution",
+ inputs_a: generate_inputs(&[TestOutput::P2WPKH(100_000), TestOutput::P2WPKH(15_000)]),
+ outputs_a: vec![],
+ inputs_b: generate_inputs(&[TestOutput::P2WPKH(85_000)]),
+ outputs_b: generate_funding_output(100_000),
+ // The error is caused by NodeA, it occurs when nodeA prepares the message to be sent to NodeB, that's why here it shows up as NodeB
+ expect_error: Some((AbortReason::InvalidLowFundingOutputValue, ErrorCulprit::NodeB)),
+ a_expected_remote_shared_output: Some((generate_funding_script_pubkey(), 120_000)), // this is higher than the actual output value
+ b_expected_remote_shared_output: None,
});
}
ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
OffersMessage::InvoiceRequest(invoice_request) => (invoice_request, reply_path.unwrap()),
OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice),
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(invoice) => panic!("Unexpected static invoice: {:?}", invoice),
OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error),
},
+ #[cfg(async_payments)]
+ ParsedOnionMessageContents::AsyncPayments(message) => panic!("Unexpected async payments message: {:?}", message),
ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
},
Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request),
OffersMessage::Invoice(invoice) => invoice,
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(invoice) => panic!("Unexpected static invoice: {:?}", invoice),
OffersMessage::InvoiceError(error) => panic!("Unexpected invoice_error: {:?}", error),
},
+ #[cfg(async_payments)]
+ ParsedOnionMessageContents::AsyncPayments(message) => panic!("Unexpected async payments message: {:?}", message),
ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
},
Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
ParsedOnionMessageContents::Offers(offers_message) => match offers_message {
OffersMessage::InvoiceRequest(invoice_request) => panic!("Unexpected invoice_request: {:?}", invoice_request),
OffersMessage::Invoice(invoice) => panic!("Unexpected invoice: {:?}", invoice),
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(invoice) => panic!("Unexpected invoice: {:?}", invoice),
OffersMessage::InvoiceError(error) => error,
},
+ #[cfg(async_payments)]
+ ParsedOnionMessageContents::AsyncPayments(message) => panic!("Unexpected async payments message: {:?}", message),
ParsedOnionMessageContents::Custom(message) => panic!("Unexpected custom message: {:?}", message),
},
Ok(PeeledOnion::Forward(_, _)) => panic!("Unexpected onion message forward"),
);
}
-/// Fails creating an offer when a blinded path cannot be created without exposing the node's id.
+/// Checks that an offer can be created using an unannounced node as a blinded path's introduction
+/// node. This is only preferred if there are no other options which may indicated either the offer
+/// is intended for the unannounced node or that the node is actually announced (e.g., an LSP) but
+/// the recipient doesn't have a network graph.
#[test]
-fn fails_creating_offer_without_blinded_paths() {
+fn creates_offer_with_blinded_path_using_unannounced_introduction_node() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
- match nodes[0].node.create_offer_builder(None) {
- Ok(_) => panic!("Expected error"),
- Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let offer = alice.node
+ .create_offer_builder(None).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), Some(alice_id));
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
}
+
+ let payment_id = PaymentId([1; 32]);
+ bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None).unwrap();
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message);
+ let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+ offer_id: offer.id(),
+ invoice_request: InvoiceRequestFields {
+ payer_id: invoice_request.payer_id(),
+ quantity: None,
+ payer_note_truncated: None,
+ },
+ });
+ assert_ne!(invoice_request.payer_id(), bob_id);
+ assert_eq!(reply_path.introduction_node, IntroductionNode::NodeId(alice_id));
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice], payment_context);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
}
-/// Fails creating a refund when a blinded path cannot be created without exposing the node's id.
+/// Checks that a refund can be created using an unannounced node as a blinded path's introduction
+/// node. This is only preferred if there are no other options which may indicated either the refund
+/// is intended for the unannounced node or that the node is actually announced (e.g., an LSP) but
+/// the sender doesn't have a network graph.
#[test]
-fn fails_creating_refund_without_blinded_paths() {
+fn creates_refund_with_blinded_path_using_unannounced_introduction_node() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
let absolute_expiry = Duration::from_secs(u64::MAX);
let payment_id = PaymentId([1; 32]);
-
- match nodes[0].node.create_refund_builder(
- 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
- ) {
- Ok(_) => panic!("Expected error"),
- Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ let refund = bob.node
+ .create_refund_builder(10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None)
+ .unwrap()
+ .build().unwrap();
+ assert_ne!(refund.payer_id(), bob_id);
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
}
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
- assert!(nodes[0].node.list_recent_payments().is_empty());
+ let expected_invoice = alice.node.request_refund_payment(&refund).unwrap();
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_eq!(invoice, expected_invoice);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
}
/// Fails creating or paying an offer when a blinded path cannot be created because no peers are
}
}
-/// Fails creating an invoice request when a blinded reply path cannot be created without exposing
-/// the node's id.
+/// Fails creating an invoice request when a blinded reply path cannot be created.
#[test]
fn fails_creating_invoice_request_without_blinded_reply_path() {
let chanmon_cfgs = create_chanmon_cfgs(6);
let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
- disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, charlie, &nodes[4], &nodes[5]]);
let offer = alice.node
.create_offer_builder(None).unwrap()
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, Secp256k1, SecretKey};
-use crate::sign::{EntropySource, NodeSigner, Recipient};
+use crate::blinded_path::{IntroductionNode, NodeIdLookUp};
+use crate::blinded_path::payment::advance_path_by_one;
use crate::events::{self, PaymentFailureReason};
use crate::ln::types::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel_state::ChannelDetails;
use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason};
use crate::offers::invoice::Bolt12Invoice;
use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router};
+use crate::sign::{EntropySource, NodeSigner, Recipient};
use crate::util::errors::APIError;
use crate::util::logger::Logger;
use crate::util::time::Time;
keysend_preimage: Option<PaymentPreimage>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
pending_amt_msat: u64,
- /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+.
+ /// Used to track the fee paid. Present iff the payment was serialized on 0.0.103+.
pending_fee_msat: Option<u64>,
/// The total payment amount across all paths, used to verify that a retry is not overpaying.
total_msat: u64,
}
}
- pub(super) fn send_payment_for_bolt12_invoice<R: Deref, ES: Deref, NS: Deref, IH, SP, L: Deref>(
+ pub(super) fn send_payment_for_bolt12_invoice<
+ R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP, L: Deref
+ >(
&self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R,
first_hops: Vec<ChannelDetails>, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
- best_block_height: u32, logger: &L,
+ node_id_lookup: &NL, secp_ctx: &Secp256k1<secp256k1::All>, best_block_height: u32,
+ logger: &L,
pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
send_payment_along_path: SP,
) -> Result<(), Bolt12PaymentError>
R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
+ NL::Target: NodeIdLookUp,
L::Target: Logger,
IH: Fn() -> InFlightHtlcs,
SP: Fn(SendAlongPathArgs) -> Result<(), APIError>,
hash_map::Entry::Vacant(_) => return Err(Bolt12PaymentError::UnexpectedInvoice),
};
- let pay_params = PaymentParameters::from_bolt12_invoice(&invoice);
+ let mut payment_params = PaymentParameters::from_bolt12_invoice(&invoice);
+
+ // Advance any blinded path where the introduction node is our node.
+ if let Ok(our_node_id) = node_signer.get_node_id(Recipient::Node) {
+ for (_, path) in payment_params.payee.blinded_route_hints_mut().iter_mut() {
+ let introduction_node_id = match path.introduction_node {
+ IntroductionNode::NodeId(pubkey) => pubkey,
+ IntroductionNode::DirectedShortChannelId(direction, scid) => {
+ match node_id_lookup.next_node_id(scid) {
+ Some(next_node_id) => *direction.select_pubkey(&our_node_id, &next_node_id),
+ None => continue,
+ }
+ },
+ };
+ if introduction_node_id == our_node_id {
+ let _ = advance_path_by_one(path, node_signer, node_id_lookup, secp_ctx);
+ }
+ }
+ }
+
let amount_msat = invoice.amount_msats();
- let mut route_params = RouteParameters::from_payment_params_and_value(pay_params, amount_msat);
+ let mut route_params = RouteParameters::from_payment_params_and_value(
+ payment_params, amount_msat
+ );
if let Some(max_fee_msat) = max_total_routing_fee_msat {
route_params.max_total_routing_fee_msat = Some(max_fee_msat);
}
use core::time::Duration;
+ use crate::blinded_path::EmptyNodeIdLookUp;
use crate::events::{Event, PathFailure, PaymentFailureReason};
use crate::ln::types::PaymentHash;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
+ let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Ok(()),
);
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
+ let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Ok(()),
);
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
+ let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Err(Bolt12PaymentError::UnexpectedInvoice),
);
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| Ok(())
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| Ok(())
),
Ok(()),
);
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Err(Bolt12PaymentError::DuplicateInvoice),
);
fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let max_in_flight_percent = 10;
let mut intercept_forwards_config = test_default_channel_config();
intercept_forwards_config.accept_intercept_htlcs = true;
+ intercept_forwards_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent;
let mut underpay_config = test_default_channel_config();
underpay_config.channel_config.accept_underpaying_htlcs = true;
+ underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent;
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(underpay_config)]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let amt_msat = 900_000;
+
let mut chan_ids = Vec::new();
for _ in 0..num_mpp_parts {
- let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000, 0);
- let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 2_000_000, 0).0.channel_id;
+ // We choose the channel size so that there can be at most one part pending on each channel.
+ let channel_size = amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100;
+ let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_size, 0);
+ let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0).0.channel_id;
chan_ids.push(channel_id);
}
// Send the initial payment.
- let amt_msat = 900_000;
let skimmed_fee_msat = 20;
let mut route_hints = Vec::new();
for _ in 0..num_mpp_parts {
// Create a new channel between C and D as A will refuse to retry on the existing one because
// it just failed.
- let chan_id_cd_2 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2;
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
// Now retry the failed HTLC.
nodes[0].node.process_pending_htlc_forwards();
expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors(&nodes[2], 1);
let cs_forward = SendEvent::from_node(&nodes[2]);
+ let cd_channel_used = cs_forward.msgs[0].channel_id;
nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]);
commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true);
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true);
expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(),
- &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd_2 }]);
+ &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]);
} else {
expect_pending_htlcs_forwardable!(nodes[3]);
expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat);
_ => panic!()
}
}
+
+#[test]
+fn test_non_strict_forwarding() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let mut config = test_default_channel_config();
+ config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Create a routing node with two outbound channels, each of which can forward 2 payments of
+ // the given value.
+ let payment_value = 1_500_000;
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
+ let (chan_update_1, _, channel_id_1, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 4_950, 0);
+ let (chan_update_2, _, channel_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0);
+
+ // Create a route once.
+ let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap();
+ let route_params = RouteParameters::from_payment_params_and_value(payment_params, payment_value);
+ let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap();
+
+ // Send 4 payments over the same route.
+ for i in 0..4 {
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None);
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ let mut send_event = SendEvent::from_event(msg_events.remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ send_event = SendEvent::from_event(msg_events.remove(0));
+ // The HTLC will be forwarded over the most appropriate channel with the corresponding peer,
+ // applying non-strict forwarding.
+ // The channel with the least amount of outbound liquidity will be used to maximize the
+ // probability of being able to successfully forward a subsequent HTLC.
+ assert_eq!(send_event.msgs[0].channel_id, if i < 2 {
+ channel_id_1
+ } else {
+ channel_id_2
+ });
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[1], &send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ assert!(matches!(events[0], Event::PaymentClaimable { .. }));
+
+ claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage)
+ );
+ }
+
+ // Send a 5th payment which will fail.
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None);
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ let mut send_event = SendEvent::from_event(msg_events.remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let routed_scid = route.paths[0].hops[1].short_channel_id;
+ let routed_channel_id = match routed_scid {
+ scid if scid == chan_update_1.contents.short_channel_id => channel_id_1,
+ scid if scid == chan_update_2.contents.short_channel_id => channel_id_2,
+ _ => panic!("Unexpected short channel id in route"),
+ };
+ // The failure to forward will refer to the channel given in the onion.
+ expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(),
+ &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]);
+
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid));
+}
use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE};
use crate::ln::wire;
use crate::ln::wire::{Encode, Type};
+use crate::onion_message::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc};
use crate::onion_message::messenger::{CustomOnionMessageHandler, PendingOnionMessage, Responder, ResponseInstruction};
use crate::onion_message::offers::{OffersMessage, OffersMessageHandler};
use crate::onion_message::packet::OnionMessageContents;
ResponseInstruction::NoResponse
}
}
+impl AsyncPaymentsMessageHandler for IgnoringMessageHandler {
+ fn held_htlc_available(
+ &self, _message: HeldHtlcAvailable, _responder: Option<Responder>,
+ ) -> ResponseInstruction<ReleaseHeldHtlc> {
+ ResponseInstruction::NoResponse
+ }
+ fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {}
+}
impl CustomOnionMessageHandler for IgnoringMessageHandler {
type CustomMessage = Infallible;
fn handle_custom_message(&self, _message: Self::CustomMessage, _responder: Option<Responder>) -> ResponseInstruction<Self::CustomMessage> {
mod payer;
pub mod refund;
pub(crate) mod signer;
-#[allow(unused)]
-pub(crate) mod static_invoice;
+#[cfg(async_payments)]
+pub mod static_invoice;
#[cfg(test)]
pub(crate) mod test_utils;
self.contents.expects_quantity()
}
+ #[cfg(async_payments)]
pub(super) fn verify<T: secp256k1::Signing>(
&self, key: &ExpandedKey, secp_ctx: &Secp256k1<T>
) -> Result<(OfferId, Option<Keypair>), ()> {
use crate::ln::inbound_payment::ExpandedKey;
use crate::ln::msgs::DecodeError;
use crate::offers::invoice::{
- check_invoice_signing_pubkey, construct_payment_paths, filter_fallbacks, BlindedPathIter,
- BlindedPayInfo, BlindedPayInfoIter, FallbackAddress, InvoiceTlvStream, InvoiceTlvStreamRef,
+ check_invoice_signing_pubkey, construct_payment_paths, filter_fallbacks, BlindedPayInfo,
+ FallbackAddress, InvoiceTlvStream, InvoiceTlvStreamRef,
};
use crate::offers::invoice_macros::{invoice_accessors_common, invoice_builder_methods_common};
use crate::offers::merkle::{
Amount, Offer, OfferContents, OfferTlvStream, OfferTlvStreamRef, Quantity,
};
use crate::offers::parse::{Bolt12ParseError, Bolt12SemanticError, ParsedMessage};
-use crate::util::ser::{
- HighZeroBytesDroppedBigSize, Iterable, SeekReadable, WithoutLength, Writeable, Writer,
-};
+use crate::util::ser::{Iterable, SeekReadable, WithoutLength, Writeable, Writer};
use crate::util::string::PrintableString;
use bitcoin::address::Address;
use bitcoin::blockdata::constants::ChainHash;
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+//! Message handling for async payments.
+
+use crate::io;
+use crate::ln::msgs::DecodeError;
+use crate::onion_message::messenger::PendingOnionMessage;
+use crate::onion_message::messenger::{Responder, ResponseInstruction};
+use crate::onion_message::packet::OnionMessageContents;
+use crate::prelude::*;
+use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
+
+// TLV record types for the `onionmsg_tlv` TLV stream as defined in BOLT 4.
+const HELD_HTLC_AVAILABLE_TLV_TYPE: u64 = 72;
+const RELEASE_HELD_HTLC_TLV_TYPE: u64 = 74;
+
+/// A handler for an [`OnionMessage`] containing an async payments message as its payload.
+///
+/// [`OnionMessage`]: crate::ln::msgs::OnionMessage
+pub trait AsyncPaymentsMessageHandler {
+ /// Handle a [`HeldHtlcAvailable`] message. A [`ReleaseHeldHtlc`] should be returned to release
+ /// the held funds.
+ fn held_htlc_available(
+ &self, message: HeldHtlcAvailable, responder: Option<Responder>,
+ ) -> ResponseInstruction<ReleaseHeldHtlc>;
+
+ /// Handle a [`ReleaseHeldHtlc`] message. If authentication of the message succeeds, an HTLC
+ /// should be released to the corresponding payee.
+ fn release_held_htlc(&self, message: ReleaseHeldHtlc);
+
+ /// Release any [`AsyncPaymentsMessage`]s that need to be sent.
+ ///
+ /// Typically, this is used for messages initiating an async payment flow rather than in response
+ /// to another message.
+ #[cfg(not(c_bindings))]
+ fn release_pending_messages(&self) -> Vec<PendingOnionMessage<AsyncPaymentsMessage>> {
+ vec![]
+ }
+
+ /// Release any [`AsyncPaymentsMessage`]s that need to be sent.
+ ///
+ /// Typically, this is used for messages initiating a payment flow rather than in response to
+ /// another message.
+ #[cfg(c_bindings)]
+ fn release_pending_messages(
+ &self,
+ ) -> Vec<(
+ AsyncPaymentsMessage,
+ crate::onion_message::messenger::Destination,
+ Option<crate::blinded_path::BlindedPath>,
+ )> {
+ vec![]
+ }
+}
+
+/// Possible async payment messages sent and received via an [`OnionMessage`].
+///
+/// [`OnionMessage`]: crate::ln::msgs::OnionMessage
+#[derive(Clone, Debug)]
+pub enum AsyncPaymentsMessage {
+ /// An HTLC is being held upstream for the often-offline recipient, to be released via
+ /// [`ReleaseHeldHtlc`].
+ HeldHtlcAvailable(HeldHtlcAvailable),
+
+ /// Releases the HTLC corresponding to an inbound [`HeldHtlcAvailable`] message.
+ ReleaseHeldHtlc(ReleaseHeldHtlc),
+}
+
+/// An HTLC destined for the recipient of this message is being held upstream. The reply path
+/// accompanying this onion message should be used to send a [`ReleaseHeldHtlc`] response, which
+/// will cause the upstream HTLC to be released.
+#[derive(Clone, Debug)]
+pub struct HeldHtlcAvailable {
+ /// The secret that will be used by the recipient of this message to release the held HTLC.
+ pub payment_release_secret: [u8; 32],
+}
+
+/// Releases the HTLC corresponding to an inbound [`HeldHtlcAvailable`] message.
+#[derive(Clone, Debug)]
+pub struct ReleaseHeldHtlc {
+ /// Used to release the HTLC held upstream if it matches the corresponding
+ /// [`HeldHtlcAvailable::payment_release_secret`].
+ pub payment_release_secret: [u8; 32],
+}
+
+impl OnionMessageContents for ReleaseHeldHtlc {
+ fn tlv_type(&self) -> u64 {
+ RELEASE_HELD_HTLC_TLV_TYPE
+ }
+ fn msg_type(&self) -> &'static str {
+ "Release Held HTLC"
+ }
+}
+
+impl_writeable_tlv_based!(HeldHtlcAvailable, {
+ (0, payment_release_secret, required),
+});
+
+impl_writeable_tlv_based!(ReleaseHeldHtlc, {
+ (0, payment_release_secret, required),
+});
+
+impl AsyncPaymentsMessage {
+ /// Returns whether `tlv_type` corresponds to a TLV record for async payment messages.
+ pub fn is_known_type(tlv_type: u64) -> bool {
+ match tlv_type {
+ HELD_HTLC_AVAILABLE_TLV_TYPE | RELEASE_HELD_HTLC_TLV_TYPE => true,
+ _ => false,
+ }
+ }
+}
+
+impl OnionMessageContents for AsyncPaymentsMessage {
+ fn tlv_type(&self) -> u64 {
+ match self {
+ Self::HeldHtlcAvailable(_) => HELD_HTLC_AVAILABLE_TLV_TYPE,
+ Self::ReleaseHeldHtlc(msg) => msg.tlv_type(),
+ }
+ }
+ fn msg_type(&self) -> &'static str {
+ match &self {
+ Self::HeldHtlcAvailable(_) => "Held HTLC Available",
+ Self::ReleaseHeldHtlc(msg) => msg.msg_type(),
+ }
+ }
+}
+
+impl Writeable for AsyncPaymentsMessage {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ match self {
+ Self::HeldHtlcAvailable(message) => message.write(w),
+ Self::ReleaseHeldHtlc(message) => message.write(w),
+ }
+ }
+}
+
+impl ReadableArgs<u64> for AsyncPaymentsMessage {
+ fn read<R: io::Read>(r: &mut R, tlv_type: u64) -> Result<Self, DecodeError> {
+ match tlv_type {
+ HELD_HTLC_AVAILABLE_TLV_TYPE => Ok(Self::HeldHtlcAvailable(Readable::read(r)?)),
+ RELEASE_HELD_HTLC_TLV_TYPE => Ok(Self::ReleaseHeldHtlc(Readable::read(r)?)),
+ _ => Err(DecodeError::InvalidValue),
+ }
+ }
+}
use crate::sign::{NodeSigner, Recipient};
use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
use crate::util::test_utils;
+use super::async_payments::{AsyncPaymentsMessageHandler, HeldHtlcAvailable, ReleaseHeldHtlc};
use super::messenger::{CustomOnionMessageHandler, DefaultMessageRouter, Destination, OnionMessagePath, OnionMessenger, PendingOnionMessage, Responder, ResponseInstruction, SendError, SendSuccess};
use super::offers::{OffersMessage, OffersMessageHandler};
use super::packet::{OnionMessageContents, Packet};
Arc<test_utils::TestKeysInterface>
>>,
Arc<TestOffersMessageHandler>,
+ Arc<TestAsyncPaymentsMessageHandler>,
Arc<TestCustomMessageHandler>
>,
custom_message_handler: Arc<TestCustomMessageHandler>,
}
}
+struct TestAsyncPaymentsMessageHandler {}
+
+impl AsyncPaymentsMessageHandler for TestAsyncPaymentsMessageHandler {
+ fn held_htlc_available(
+ &self, _message: HeldHtlcAvailable, _responder: Option<Responder>,
+ ) -> ResponseInstruction<ReleaseHeldHtlc> {
+ ResponseInstruction::NoResponse
+ }
+ fn release_held_htlc(&self, _message: ReleaseHeldHtlc) {}
+}
+
#[derive(Clone, Debug, PartialEq)]
enum TestCustomMessage {
Ping,
DefaultMessageRouter::new(network_graph.clone(), entropy_source.clone())
);
let offers_message_handler = Arc::new(TestOffersMessageHandler {});
+ let async_payments_message_handler = Arc::new(TestAsyncPaymentsMessageHandler {});
let custom_message_handler = Arc::new(TestCustomMessageHandler::new());
let messenger = if cfg.intercept_offline_peer_oms {
OnionMessenger::new_with_offline_peer_interception(
entropy_source.clone(), node_signer.clone(), logger.clone(),
node_id_lookup, message_router, offers_message_handler,
- custom_message_handler.clone()
+ async_payments_message_handler, custom_message_handler.clone()
)
} else {
OnionMessenger::new(
entropy_source.clone(), node_signer.clone(), logger.clone(),
node_id_lookup, message_router, offers_message_handler,
- custom_message_handler.clone()
+ async_payments_message_handler, custom_message_handler.clone()
)
};
nodes.push(MessengerNode {
let path_id = Some([2; 32]);
let reply_path = BlindedPath::new_for_message(&[], bob.node_id, &*bob.entropy_source, &secp_ctx).unwrap();
- // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced.
- // Therefore, the reply_path cannot be used for the response.
+ // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced and
+ // disconnected. Thus, a reply path could no be created for the response.
+ disconnect_peers(alice, bob);
let responder = Responder::new(reply_path, path_id);
alice.custom_message_handler.expect_message_and_response(message.clone());
let response_instruction = alice.custom_message_handler.handle_custom_message(message, Some(responder));
use crate::ln::msgs::{self, OnionMessage, OnionMessageHandler, SocketAddress};
use crate::ln::onion_utils;
use crate::routing::gossip::{NetworkGraph, NodeId, ReadOnlyNetworkGraph};
+use super::async_payments::AsyncPaymentsMessageHandler;
+#[cfg(async_payments)]
+use super::async_payments::AsyncPaymentsMessage;
use super::packet::OnionMessageContents;
use super::packet::ParsedOnionMessageContents;
use super::offers::OffersMessageHandler;
type OffersMessageHandler: OffersMessageHandler + ?Sized;
/// A type that may be dereferenced to [`Self::OffersMessageHandler`]
type OMH: Deref<Target = Self::OffersMessageHandler>;
+ /// A type implementing [`AsyncPaymentsMessageHandler`]
+ type AsyncPaymentsMessageHandler: AsyncPaymentsMessageHandler + ?Sized;
+ /// A type that may be dereferenced to [`Self::AsyncPaymentsMessageHandler`]
+ type APH: Deref<Target = Self::AsyncPaymentsMessageHandler>;
/// A type implementing [`CustomOnionMessageHandler`]
type CustomOnionMessageHandler: CustomOnionMessageHandler + ?Sized;
/// A type that may be dereferenced to [`Self::CustomOnionMessageHandler`]
type CMH: Deref<Target = Self::CustomOnionMessageHandler>;
/// Returns a reference to the actual [`OnionMessenger`] object.
- fn get_om(&self) -> &OnionMessenger<Self::ES, Self::NS, Self::L, Self::NL, Self::MR, Self::OMH, Self::CMH>;
+ fn get_om(&self) -> &OnionMessenger<Self::ES, Self::NS, Self::L, Self::NL, Self::MR, Self::OMH, Self::APH, Self::CMH>;
}
-impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref> AOnionMessenger
-for OnionMessenger<ES, NS, L, NL, MR, OMH, CMH> where
+impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, APH: Deref, CMH: Deref> AOnionMessenger
+for OnionMessenger<ES, NS, L, NL, MR, OMH, APH, CMH> where
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
+ APH:: Target: AsyncPaymentsMessageHandler,
CMH::Target: CustomOnionMessageHandler,
{
type EntropySource = ES::Target;
type MR = MR;
type OffersMessageHandler = OMH::Target;
type OMH = OMH;
+ type AsyncPaymentsMessageHandler = APH::Target;
+ type APH = APH;
type CustomOnionMessageHandler = CMH::Target;
type CMH = CMH;
- fn get_om(&self) -> &OnionMessenger<ES, NS, L, NL, MR, OMH, CMH> { self }
+ fn get_om(&self) -> &OnionMessenger<ES, NS, L, NL, MR, OMH, APH, CMH> { self }
}
/// A sender, receiver and forwarder of [`OnionMessage`]s.
/// # let message_router = Arc::new(FakeMessageRouter {});
/// # let custom_message_handler = IgnoringMessageHandler {};
/// # let offers_message_handler = IgnoringMessageHandler {};
+/// # let async_payments_message_handler = IgnoringMessageHandler {};
/// // Create the onion messenger. This must use the same `keys_manager` as is passed to your
/// // ChannelManager.
/// let onion_messenger = OnionMessenger::new(
/// &keys_manager, &keys_manager, logger, &node_id_lookup, message_router,
-/// &offers_message_handler, &custom_message_handler
+/// &offers_message_handler, &async_payments_message_handler, &custom_message_handler
/// );
/// # #[derive(Debug)]
///
/// [`InvoiceRequest`]: crate::offers::invoice_request::InvoiceRequest
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
-pub struct OnionMessenger<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref>
-where
+pub struct OnionMessenger<
+ ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, APH: Deref, CMH: Deref
+> where
ES::Target: EntropySource,
NS::Target: NodeSigner,
L::Target: Logger,
NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
+ APH::Target: AsyncPaymentsMessageHandler,
CMH::Target: CustomOnionMessageHandler,
{
entropy_source: ES,
node_id_lookup: NL,
message_router: MR,
offers_handler: OMH,
+ #[allow(unused)]
+ async_payments_handler: APH,
custom_handler: CMH,
intercept_messages_for_offline_peers: bool,
pending_events: Mutex<PendingEvents>,
}
fn create_blinded_paths_from_iter<
- I: Iterator<Item = ForwardNode>,
+ I: ExactSizeIterator<Item = ForwardNode>,
T: secp256k1::Signing + secp256k1::Verification
>(
&self, recipient: PublicKey, peers: I, secp_ctx: &Secp256k1<T>, compact_paths: bool
let is_recipient_announced =
network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient));
+ let has_one_peer = peers.len() == 1;
let mut peer_info = peers
- // Limit to peers with announced channels
+ // Limit to peers with announced channels unless the recipient is unannounced.
.filter_map(|peer|
network_graph
.node(&NodeId::from_pubkey(&peer.node_id))
- .filter(|info| info.channels.len() >= MIN_PEER_CHANNELS)
+ .filter(|info|
+ !is_recipient_announced || info.channels.len() >= MIN_PEER_CHANNELS
+ )
.map(|info| (peer, info.is_tor_only(), info.channels.len()))
+ // Allow messages directly with the only peer when unannounced.
+ .or_else(|| (!is_recipient_announced && has_one_peer)
+ .then(|| (peer, false, 0))
+ )
)
// Exclude Tor-only nodes when the recipient is announced.
.filter(|(_, is_tor_only, _)| !(*is_tor_only && is_recipient_announced))
}
}
-impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref>
-OnionMessenger<ES, NS, L, NL, MR, OMH, CMH>
+impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, APH: Deref, CMH: Deref>
+OnionMessenger<ES, NS, L, NL, MR, OMH, APH, CMH>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
+ APH::Target: AsyncPaymentsMessageHandler,
CMH::Target: CustomOnionMessageHandler,
{
/// Constructs a new `OnionMessenger` to send, forward, and delegate received onion messages to
/// their respective handlers.
pub fn new(
entropy_source: ES, node_signer: NS, logger: L, node_id_lookup: NL, message_router: MR,
- offers_handler: OMH, custom_handler: CMH
+ offers_handler: OMH, async_payments_handler: APH, custom_handler: CMH
) -> Self {
Self::new_inner(
entropy_source, node_signer, logger, node_id_lookup, message_router,
- offers_handler, custom_handler, false
+ offers_handler, async_payments_handler, custom_handler, false
)
}
/// peers.
pub fn new_with_offline_peer_interception(
entropy_source: ES, node_signer: NS, logger: L, node_id_lookup: NL,
- message_router: MR, offers_handler: OMH, custom_handler: CMH
+ message_router: MR, offers_handler: OMH, async_payments_handler: APH, custom_handler: CMH
) -> Self {
Self::new_inner(
entropy_source, node_signer, logger, node_id_lookup, message_router,
- offers_handler, custom_handler, true
+ offers_handler, async_payments_handler, custom_handler, true
)
}
fn new_inner(
entropy_source: ES, node_signer: NS, logger: L, node_id_lookup: NL,
- message_router: MR, offers_handler: OMH, custom_handler: CMH,
+ message_router: MR, offers_handler: OMH, async_payments_handler: APH, custom_handler: CMH,
intercept_messages_for_offline_peers: bool
) -> Self {
let mut secp_ctx = Secp256k1::new();
node_id_lookup,
message_router,
offers_handler,
+ async_payments_handler,
custom_handler,
intercept_messages_for_offline_peers,
pending_events: Mutex::new(PendingEvents {
false
}
-impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref> EventsProvider
-for OnionMessenger<ES, NS, L, NL, MR, OMH, CMH>
+impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, APH: Deref, CMH: Deref> EventsProvider
+for OnionMessenger<ES, NS, L, NL, MR, OMH, APH, CMH>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
+ APH::Target: AsyncPaymentsMessageHandler,
CMH::Target: CustomOnionMessageHandler,
{
fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
}
}
-impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, CMH: Deref> OnionMessageHandler
-for OnionMessenger<ES, NS, L, NL, MR, OMH, CMH>
+impl<ES: Deref, NS: Deref, L: Deref, NL: Deref, MR: Deref, OMH: Deref, APH: Deref, CMH: Deref> OnionMessageHandler
+for OnionMessenger<ES, NS, L, NL, MR, OMH, APH, CMH>
where
ES::Target: EntropySource,
NS::Target: NodeSigner,
NL::Target: NodeIdLookUp,
MR::Target: MessageRouter,
OMH::Target: OffersMessageHandler,
+ APH::Target: AsyncPaymentsMessageHandler,
CMH::Target: CustomOnionMessageHandler,
{
fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage) {
"Received an onion message with path_id {:02x?} and {} reply_path: {:?}",
path_id, if reply_path.is_some() { "a" } else { "no" }, message);
+ let responder = reply_path.map(
+ |reply_path| Responder::new(reply_path, path_id)
+ );
match message {
ParsedOnionMessageContents::Offers(msg) => {
- let responder = reply_path.map(
- |reply_path| Responder::new(reply_path, path_id)
- );
let response_instructions = self.offers_handler.handle_message(msg, responder);
let _ = self.handle_onion_message_response(response_instructions);
},
- ParsedOnionMessageContents::Custom(msg) => {
- let responder = reply_path.map(
- |reply_path| Responder::new(reply_path, path_id)
+ #[cfg(async_payments)]
+ ParsedOnionMessageContents::AsyncPayments(AsyncPaymentsMessage::HeldHtlcAvailable(msg)) => {
+ let response_instructions = self.async_payments_handler.held_htlc_available(
+ msg, responder
);
+ let _ = self.handle_onion_message_response(response_instructions);
+ },
+ #[cfg(async_payments)]
+ ParsedOnionMessageContents::AsyncPayments(AsyncPaymentsMessage::ReleaseHeldHtlc(msg)) => {
+ self.async_payments_handler.release_held_htlc(msg);
+ },
+ ParsedOnionMessageContents::Custom(msg) => {
let response_instructions = self.custom_handler.handle_custom_message(msg, responder);
let _ = self.handle_onion_message_response(response_instructions);
},
Arc<SimpleArcChannelManager<M, T, F, L>>,
Arc<DefaultMessageRouter<Arc<NetworkGraph<Arc<L>>>, Arc<L>, Arc<KeysManager>>>,
Arc<SimpleArcChannelManager<M, T, F, L>>,
+ Arc<SimpleArcChannelManager<M, T, F, L>>,
IgnoringMessageHandler
>;
&'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
&'j DefaultMessageRouter<&'g NetworkGraph<&'b L>, &'b L, &'a KeysManager>,
&'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
+ &'i SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L>,
IgnoringMessageHandler
>;
//! [blinded paths]: crate::blinded_path::BlindedPath
//! [`OnionMessenger`]: self::messenger::OnionMessenger
+pub mod async_payments;
pub mod messenger;
pub mod offers;
pub mod packet;
use crate::offers::invoice_request::InvoiceRequest;
use crate::offers::invoice::Bolt12Invoice;
use crate::offers::parse::Bolt12ParseError;
+#[cfg(async_payments)]
+use crate::offers::static_invoice::StaticInvoice;
use crate::onion_message::packet::OnionMessageContents;
use crate::util::logger::Logger;
use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
const INVOICE_REQUEST_TLV_TYPE: u64 = 64;
const INVOICE_TLV_TYPE: u64 = 66;
const INVOICE_ERROR_TLV_TYPE: u64 = 68;
+#[cfg(async_payments)]
+const STATIC_INVOICE_TLV_TYPE: u64 = 70;
/// A handler for an [`OnionMessage`] containing a BOLT 12 Offers message as its payload.
///
/// [`Refund`]: crate::offers::refund::Refund
Invoice(Bolt12Invoice),
+ #[cfg(async_payments)]
+ /// A [`StaticInvoice`] sent in response to an [`InvoiceRequest`].
+ StaticInvoice(StaticInvoice),
+
/// An error from handling an [`OffersMessage`].
InvoiceError(InvoiceError),
}
/// Returns whether `tlv_type` corresponds to a TLV record for Offers.
pub fn is_known_type(tlv_type: u64) -> bool {
match tlv_type {
- INVOICE_REQUEST_TLV_TYPE | INVOICE_TLV_TYPE | INVOICE_ERROR_TLV_TYPE => true,
+ INVOICE_REQUEST_TLV_TYPE
+ | INVOICE_TLV_TYPE
+ | INVOICE_ERROR_TLV_TYPE => true,
+ #[cfg(async_payments)]
+ STATIC_INVOICE_TLV_TYPE => true,
_ => false,
}
}
match tlv_type {
INVOICE_REQUEST_TLV_TYPE => Ok(Self::InvoiceRequest(InvoiceRequest::try_from(bytes)?)),
INVOICE_TLV_TYPE => Ok(Self::Invoice(Bolt12Invoice::try_from(bytes)?)),
+ #[cfg(async_payments)]
+ STATIC_INVOICE_TLV_TYPE => Ok(Self::StaticInvoice(StaticInvoice::try_from(bytes)?)),
_ => Err(Bolt12ParseError::Decode(DecodeError::InvalidValue)),
}
}
OffersMessage::Invoice(message) => {
write!(f, "{:?}", message.as_tlv_stream())
}
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(message) => {
+ write!(f, "{:?}", message)
+ }
OffersMessage::InvoiceError(message) => {
write!(f, "{:?}", message)
}
match self {
OffersMessage::InvoiceRequest(_) => INVOICE_REQUEST_TLV_TYPE,
OffersMessage::Invoice(_) => INVOICE_TLV_TYPE,
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(_) => STATIC_INVOICE_TLV_TYPE,
OffersMessage::InvoiceError(_) => INVOICE_ERROR_TLV_TYPE,
}
}
match &self {
OffersMessage::InvoiceRequest(_) => "Invoice Request",
OffersMessage::Invoice(_) => "Invoice",
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(_) => "Static Invoice",
OffersMessage::InvoiceError(_) => "Invoice Error",
}
}
match self {
OffersMessage::InvoiceRequest(message) => message.write(w),
OffersMessage::Invoice(message) => message.write(w),
+ #[cfg(async_payments)]
+ OffersMessage::StaticInvoice(message) => message.write(w),
OffersMessage::InvoiceError(message) => message.write(w),
}
}
use crate::blinded_path::utils::Padding;
use crate::ln::msgs::DecodeError;
use crate::ln::onion_utils;
+#[cfg(async_payments)]
+use super::async_payments::AsyncPaymentsMessage;
use super::messenger::CustomOnionMessageHandler;
use super::offers::OffersMessage;
use crate::crypto::streams::{ChaChaPolyReadAdapter, ChaChaPolyWriteAdapter};
pub enum ParsedOnionMessageContents<T: OnionMessageContents> {
/// A message related to BOLT 12 Offers.
Offers(OffersMessage),
+ /// A message related to async payments.
+ #[cfg(async_payments)]
+ AsyncPayments(AsyncPaymentsMessage),
/// A custom onion message specified by the user.
Custom(T),
}
fn tlv_type(&self) -> u64 {
match self {
&ParsedOnionMessageContents::Offers(ref msg) => msg.tlv_type(),
+ #[cfg(async_payments)]
+ &ParsedOnionMessageContents::AsyncPayments(ref msg) => msg.tlv_type(),
&ParsedOnionMessageContents::Custom(ref msg) => msg.tlv_type(),
}
}
fn msg_type(&self) -> &'static str {
match self {
ParsedOnionMessageContents::Offers(ref msg) => msg.msg_type(),
+ #[cfg(async_payments)]
+ ParsedOnionMessageContents::AsyncPayments(ref msg) => msg.msg_type(),
ParsedOnionMessageContents::Custom(ref msg) => msg.msg_type(),
}
}
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
match self {
ParsedOnionMessageContents::Offers(msg) => Ok(msg.write(w)?),
+ #[cfg(async_payments)]
+ ParsedOnionMessageContents::AsyncPayments(msg) => Ok(msg.write(w)?),
ParsedOnionMessageContents::Custom(msg) => Ok(msg.write(w)?),
}
}
message = Some(ParsedOnionMessageContents::Offers(msg));
Ok(true)
},
+ #[cfg(async_payments)]
+ tlv_type if AsyncPaymentsMessage::is_known_type(tlv_type) => {
+ let msg = AsyncPaymentsMessage::read(msg_reader, tlv_type)?;
+ message = Some(ParsedOnionMessageContents::AsyncPayments(msg));
+ Ok(true)
+ },
_ => match handler.read_custom_message(msg_type, msg_reader)? {
Some(msg) => {
message = Some(ParsedOnionMessageContents::Custom(msg));
// recipient's node_id.
const MIN_PEER_CHANNELS: usize = 3;
+ let has_one_peer = first_hops
+ .first()
+ .map(|details| details.counterparty.node_id)
+ .map(|node_id| first_hops
+ .iter()
+ .skip(1)
+ .all(|details| details.counterparty.node_id == node_id)
+ )
+ .unwrap_or(false);
+
let network_graph = self.network_graph.deref().read_only();
+ let is_recipient_announced =
+ network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient));
+
let paths = first_hops.into_iter()
.filter(|details| details.counterparty.features.supports_route_blinding())
.filter(|details| amount_msats <= details.inbound_capacity_msat)
.filter(|details| amount_msats >= details.inbound_htlc_minimum_msat.unwrap_or(0))
.filter(|details| amount_msats <= details.inbound_htlc_maximum_msat.unwrap_or(u64::MAX))
+ // Limit to peers with announced channels unless the recipient is unannounced.
.filter(|details| network_graph
.node(&NodeId::from_pubkey(&details.counterparty.node_id))
- .map(|node_info| node_info.channels.len() >= MIN_PEER_CHANNELS)
- .unwrap_or(false)
+ .map(|node| !is_recipient_announced || node.channels.len() >= MIN_PEER_CHANNELS)
+ // Allow payments directly with the only peer when unannounced.
+ .unwrap_or(!is_recipient_announced && has_one_peer)
)
.filter_map(|details| {
let short_channel_id = match details.get_inbound_payment_scid() {
}
}
+ pub(crate) fn blinded_route_hints_mut(&mut self) -> &mut [(BlindedPayInfo, BlindedPath)] {
+ match self {
+ Self::Blinded { route_hints, .. } => &mut route_hints[..],
+ Self::Clear { .. } => &mut []
+ }
+ }
+
fn unblinded_route_hints(&self) -> &[RouteHint] {
match self {
Self::Blinded { .. } => &[],
$($crate::_init_tlv_field_var!($tlvfield, $fieldty);)*
$crate::decode_tlv_stream!(r, {$(($type, $tlvfield, $fieldty)),*});
Ok(Self {
- $($field),*,
+ $($field,)*
$($tlvfield),*
})
}
fn simple_test_tlv_write() {
do_simple_test_tlv_write().unwrap();
}
+
+ #[derive(Debug, Eq, PartialEq)]
+ struct EmptyMsg {}
+ impl_writeable_msg!(EmptyMsg, {}, {});
+
+ #[test]
+ fn impl_writeable_msg_empty() {
+ let msg = EmptyMsg {};
+ let mut encoded_msg = msg.encode();
+ assert!(encoded_msg.is_empty());
+ let mut encoded_msg_stream = Cursor::new(&mut encoded_msg);
+ let decoded_msg: EmptyMsg = Readable::read(&mut encoded_msg_stream).unwrap();
+ assert_eq!(msg, decoded_msg);
+ }
}
#[allow(unused_imports)]
use crate::prelude::*;
-use core::cmp;
+use core::{cmp, fmt};
use crate::sync::{Mutex, Arc};
#[cfg(test)] use crate::sync::MutexGuard;
/// Channel state used for policy enforcement
pub state: Arc<Mutex<EnforcementState>>,
pub disable_revocation_policy_check: bool,
- /// When `true` (the default), the signer will respond immediately with signatures. When `false`,
- /// the signer will return an error indicating that it is unavailable.
- pub available: Arc<Mutex<bool>>,
+ /// Set of signer operations that are disabled. If an operation is disabled,
+ /// the signer will return `Err` when the corresponding method is called.
+ pub disabled_signer_ops: Arc<Mutex<HashSet<SignerOp>>>,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum SignerOp {
+ GetPerCommitmentPoint,
+ ReleaseCommitmentSecret,
+ ValidateHolderCommitment,
+ SignCounterpartyCommitment,
+ ValidateCounterpartyRevocation,
+ SignHolderCommitment,
+ SignJusticeRevokedOutput,
+ SignJusticeRevokedHtlc,
+ SignHolderHtlcTransaction,
+ SignCounterpartyHtlcTransaction,
+ SignClosingTransaction,
+ SignHolderAnchorInput,
+ SignChannelAnnouncementWithFundingKey,
+}
+
+impl fmt::Display for SignerOp {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ SignerOp::GetPerCommitmentPoint => write!(f, "get_per_commitment_point"),
+ SignerOp::ReleaseCommitmentSecret => write!(f, "release_commitment_secret"),
+ SignerOp::ValidateHolderCommitment => write!(f, "validate_holder_commitment"),
+ SignerOp::SignCounterpartyCommitment => write!(f, "sign_counterparty_commitment"),
+ SignerOp::ValidateCounterpartyRevocation => write!(f, "validate_counterparty_revocation"),
+ SignerOp::SignHolderCommitment => write!(f, "sign_holder_commitment"),
+ SignerOp::SignJusticeRevokedOutput => write!(f, "sign_justice_revoked_output"),
+ SignerOp::SignJusticeRevokedHtlc => write!(f, "sign_justice_revoked_htlc"),
+ SignerOp::SignHolderHtlcTransaction => write!(f, "sign_holder_htlc_transaction"),
+ SignerOp::SignCounterpartyHtlcTransaction => write!(f, "sign_counterparty_htlc_transaction"),
+ SignerOp::SignClosingTransaction => write!(f, "sign_closing_transaction"),
+ SignerOp::SignHolderAnchorInput => write!(f, "sign_holder_anchor_input"),
+ SignerOp::SignChannelAnnouncementWithFundingKey => write!(f, "sign_channel_announcement_with_funding_key"),
+ }
+ }
}
impl PartialEq for TestChannelSigner {
inner,
state,
disable_revocation_policy_check: false,
- available: Arc::new(Mutex::new(true)),
+ disabled_signer_ops: Arc::new(Mutex::new(new_hash_set())),
}
}
inner,
state,
disable_revocation_policy_check,
- available: Arc::new(Mutex::new(true)),
+ disabled_signer_ops: Arc::new(Mutex::new(new_hash_set())),
}
}
self.state.lock().unwrap()
}
- /// Marks the signer's availability.
- ///
- /// When `true`, methods are forwarded to the underlying signer as normal. When `false`, some
- /// methods will return `Err` indicating that the signer is unavailable. Intended to be used for
- /// testing asynchronous signing.
- pub fn set_available(&self, available: bool) {
- *self.available.lock().unwrap() = available;
+ pub fn enable_op(&mut self, signer_op: SignerOp) {
+ self.disabled_signer_ops.lock().unwrap().remove(&signer_op);
+ }
+
+ pub fn disable_op(&mut self, signer_op: SignerOp) {
+ self.disabled_signer_ops.lock().unwrap().insert(signer_op);
+ }
+
+ fn is_signer_available(&self, signer_op: SignerOp) -> bool {
+ !self.disabled_signer_ops.lock().unwrap().contains(&signer_op)
}
}
}
fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::ValidateCounterpartyRevocation) {
return Err(());
}
let mut state = self.state.lock().unwrap();
self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
{
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) {
return Err(());
}
let mut state = self.state.lock().unwrap();
}
fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignHolderCommitment) {
return Err(());
}
let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
}
fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignJusticeRevokedOutput) {
return Err(());
}
Ok(EcdsaChannelSigner::sign_justice_revoked_output(&self.inner, justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
}
fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignJusticeRevokedHtlc) {
return Err(());
}
Ok(EcdsaChannelSigner::sign_justice_revoked_htlc(&self.inner, justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
&self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
secp_ctx: &Secp256k1<secp256k1::All>
) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignHolderHtlcTransaction) {
return Err(());
}
let state = self.state.lock().unwrap();
}
fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignCounterpartyHtlcTransaction) {
return Err(());
}
Ok(EcdsaChannelSigner::sign_counterparty_htlc_transaction(&self.inner, htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
// As long as our minimum dust limit is enforced and is greater than our anchor output
// value, an anchor output can only have an index within [0, 1].
assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignHolderAnchorInput) {
return Err(());
}
EcdsaChannelSigner::sign_holder_anchor_input(&self.inner, anchor_tx, input, secp_ctx)
use bitcoin::psbt::Psbt;
use bitcoin::Sequence;
+use super::test_channel_signer::SignerOp;
+
pub fn pubkey(byte: u8) -> PublicKey {
let secp_ctx = Secp256k1::new();
PublicKey::from_secret_key(&secp_ctx, &privkey(byte))
///
/// [`ChannelMonitor`]: channelmonitor::ChannelMonitor
pub offchain_monitor_updates: Mutex<HashMap<OutPoint, HashSet<u64>>>,
+ /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
+ /// monitor's funding outpoint here.
+ pub chain_sync_monitor_persistences: Mutex<VecDeque<OutPoint>>
}
impl TestPersister {
pub fn new() -> Self {
Self {
update_rets: Mutex::new(VecDeque::new()),
offchain_monitor_updates: Mutex::new(new_hash_map()),
+ chain_sync_monitor_persistences: Mutex::new(VecDeque::new())
}
}
ret = update_ret;
}
- if let Some(update) = update {
+ if let Some(update) = update {
self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update.update_id);
+ } else {
+ self.chain_sync_monitor_persistences.lock().unwrap().push_back(funding_txo);
}
ret
}
fn archive_persisted_channel(&self, funding_txo: OutPoint) {
- // remove the channel from the offchain_monitor_updates map
+ // remove the channel from the offchain_monitor_updates and chain_sync_monitor_persistences.
self.offchain_monitor_updates.lock().unwrap().remove(&funding_txo);
+ self.chain_sync_monitor_persistences.lock().unwrap().retain(|x| x != &funding_txo);
}
}
enforcement_states: Mutex<HashMap<[u8;32], Arc<Mutex<EnforcementState>>>>,
expectations: Mutex<Option<VecDeque<OnGetShutdownScriptpubkey>>>,
pub unavailable_signers: Mutex<HashSet<[u8; 32]>>,
+ pub unavailable_signers_ops: Mutex<HashMap<[u8; 32], HashSet<SignerOp>>>,
}
impl EntropySource for TestKeysInterface {
fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner {
let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id);
let state = self.make_enforcement_state_cell(keys.commitment_seed);
- let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check);
- if self.unavailable_signers.lock().unwrap().contains(&channel_keys_id) {
- signer.set_available(false);
+ let mut signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check);
+ if let Some(ops) = self.unavailable_signers_ops.lock().unwrap().get(&channel_keys_id) {
+ for &op in ops {
+ signer.disable_op(op);
+ }
}
signer
}
enforcement_states: Mutex::new(new_hash_map()),
expectations: Mutex::new(None),
unavailable_signers: Mutex::new(new_hash_set()),
+ unavailable_signers_ops: Mutex::new(new_hash_map()),
}
}
-./bench/benches/bench.rs
./lightning-background-processor/src/lib.rs
./lightning-block-sync/src/convert.rs
./lightning-block-sync/src/gossip.rs
./lightning-persister/src/lib.rs
./lightning-persister/src/test_utils.rs
./lightning-persister/src/utils.rs
-./lightning-rapid-gossip-sync/src/error.rs
-./lightning-rapid-gossip-sync/src/lib.rs
-./lightning-rapid-gossip-sync/src/processing.rs
./lightning/src/blinded_path/message.rs
./lightning/src/blinded_path/mod.rs
./lightning/src/blinded_path/payment.rs