let inner: InMemorySigner = ReadableArgs::read(&mut reader, self)?;
let state = self.make_enforcement_state_cell(inner.commitment_seed);
- Ok(TestChannelSigner {
- inner,
- state,
- disable_revocation_policy_check: false,
- available: Arc::new(Mutex::new(true)),
- })
+ Ok(TestChannelSigner::new_with_revoked(inner, state, false))
}
fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
- // Two feerate requests to check dust exposure
- ext_from_hex("00fd00fd", &mut test);
+ // Three feerate requests to check dust exposure
+ ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7: UpdateHTLCs event for node 03020000 with 1 HTLCs for channel 3f000000)
// we respond with commitment_signed then revoke_and_ack (a weird, but valid, order)
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
- // Two feerate requests to check dust exposure
- ext_from_hex("00fd00fd", &mut test);
+ // Three feerate requests to check dust exposure
+ ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
// we respond with revoke_and_ack, then commitment_signed, then update_fail_htlc
// process the now-pending HTLC forward
ext_from_hex("07", &mut test);
- // Two feerate requests to check dust exposure
- ext_from_hex("00fd00fd", &mut test);
+ // Three feerate requests to check dust exposure
+ ext_from_hex("00fd00fd00fd", &mut test);
// client now sends id 1 update_add_htlc and commitment_signed (CHECK 7 duplicate)
// connect a block with one transaction of len 125
#![deny(rustdoc::broken_intra_doc_links)]
#![deny(rustdoc::private_intra_doc_links)]
-
#![deny(missing_docs)]
#![deny(unsafe_code)]
#![deny(non_upper_case_globals)]
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
-#[cfg(ldk_bench)] extern crate criterion;
+#[cfg(ldk_bench)]
+extern crate criterion;
#[cfg(not(feature = "std"))]
extern crate alloc;
-#[cfg(feature = "std")]
-use std::fs::File;
use core::ops::Deref;
use core::sync::atomic::{AtomicBool, Ordering};
+#[cfg(feature = "std")]
+use std::fs::File;
use lightning::io;
use lightning::ln::msgs::{DecodeError, LightningError};
/// See [crate-level documentation] for usage.
///
/// [crate-level documentation]: crate
-pub struct RapidGossipSync<NG: Deref<Target=NetworkGraph<L>>, L: Deref>
-where L::Target: Logger {
+pub struct RapidGossipSync<NG: Deref<Target = NetworkGraph<L>>, L: Deref>
+where
+ L::Target: Logger,
+{
network_graph: NG,
logger: L,
- is_initial_sync_complete: AtomicBool
+ is_initial_sync_complete: AtomicBool,
}
-impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L::Target: Logger {
+impl<NG: Deref<Target = NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L>
+where
+ L::Target: Logger,
+{
/// Instantiate a new [`RapidGossipSync`] instance.
pub fn new(network_graph: NG, logger: L) -> Self {
- Self {
- network_graph,
- logger,
- is_initial_sync_complete: AtomicBool::new(false)
- }
+ Self { network_graph, logger, is_initial_sync_complete: AtomicBool::new(false) }
}
/// Sync gossip data from a file.
///
#[cfg(feature = "std")]
pub fn sync_network_graph_with_file_path(
- &self,
- sync_path: &str,
+ &self, sync_path: &str,
) -> Result<u32, GraphSyncError> {
let mut file = File::open(sync_path)?;
self.update_network_graph_from_byte_stream(&mut file)
///
/// `update_data`: `&[u8]` binary stream that comprises the update data
/// `current_time_unix`: `Option<u64>` optional current timestamp to verify data age
- pub fn update_network_graph_no_std(&self, update_data: &[u8], current_time_unix: Option<u64>) -> Result<u32, GraphSyncError> {
+ pub fn update_network_graph_no_std(
+ &self, update_data: &[u8], current_time_unix: Option<u64>,
+ ) -> Result<u32, GraphSyncError> {
let mut read_cursor = io::Cursor::new(update_data);
self.update_network_graph_from_byte_stream_no_std(&mut read_cursor, current_time_unix)
}
use bitcoin::Network;
+ use crate::{GraphSyncError, RapidGossipSync};
use lightning::ln::msgs::DecodeError;
use lightning::routing::gossip::NetworkGraph;
use lightning::util::test_utils::TestLogger;
- use crate::{GraphSyncError, RapidGossipSync};
#[test]
fn test_sync_from_file() {
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let start = std::time::Instant::now();
- let sync_result = rapid_sync
- .sync_network_graph_with_file_path("./res/full_graph.lngossip");
+ let sync_result = rapid_sync.sync_network_graph_with_file_path("./res/full_graph.lngossip");
if let Err(GraphSyncError::DecodeError(DecodeError::Io(io_error))) = &sync_result {
let error_string = format!("Input file lightning-rapid-gossip-sync/res/full_graph.lngossip is missing! Download it from https://bitcoin.ninja/ldk-compressed_graph-285cb27df79-2022-07-21.bin\n\n{:?}", io_error);
#[cfg(not(require_route_graph_test))]
use bitcoin::blockdata::constants::ChainHash;
use bitcoin::secp256k1::PublicKey;
-use lightning::ln::msgs::{DecodeError, ErrorAction, LightningError, SocketAddress, UnsignedChannelUpdate, UnsignedNodeAnnouncement};
+use lightning::io;
+use lightning::ln::msgs::{
+ DecodeError, ErrorAction, LightningError, SocketAddress, UnsignedChannelUpdate,
+ UnsignedNodeAnnouncement,
+};
use lightning::routing::gossip::{NetworkGraph, NodeAlias, NodeId};
use lightning::util::logger::Logger;
-use lightning::{log_debug, log_warn, log_trace, log_given_level, log_gossip};
use lightning::util::ser::{BigSize, FixedLengthReader, Readable};
-use lightning::io;
+use lightning::{log_debug, log_given_level, log_gossip, log_trace, log_warn};
use crate::{GraphSyncError, RapidGossipSync};
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(all(not(feature = "std"), not(test)))]
-use alloc::{vec::Vec, borrow::ToOwned};
+use alloc::{borrow::ToOwned, vec::Vec};
use lightning::ln::features::NodeFeatures;
/// The purpose of this prefix is to identify the serialization format, should other rapid gossip
/// suggestion.
const STALE_RGS_UPDATE_AGE_LIMIT_SECS: u64 = 60 * 60 * 24 * 14;
-impl<NG: Deref<Target=NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L> where L::Target: Logger {
+impl<NG: Deref<Target = NetworkGraph<L>>, L: Deref> RapidGossipSync<NG, L>
+where
+ L::Target: Logger,
+{
#[cfg(feature = "std")]
pub(crate) fn update_network_graph_from_byte_stream<R: io::Read>(
- &self,
- read_cursor: &mut R,
+ &self, read_cursor: &mut R,
) -> Result<u32, GraphSyncError> {
#[allow(unused_mut, unused_assignments)]
let mut current_time_unix = None;
{
// Note that many tests rely on being able to set arbitrarily old timestamps, thus we
// disable this check during tests!
- current_time_unix = Some(SystemTime::now().duration_since(UNIX_EPOCH).expect("Time must be > 1970").as_secs());
+ current_time_unix = Some(
+ SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .expect("Time must be > 1970")
+ .as_secs(),
+ );
}
self.update_network_graph_from_byte_stream_no_std(read_cursor, current_time_unix)
}
pub(crate) fn update_network_graph_from_byte_stream_no_std<R: io::Read>(
- &self,
- mut read_cursor: &mut R,
- current_time_unix: Option<u64>
+ &self, mut read_cursor: &mut R, current_time_unix: Option<u64>,
) -> Result<u32, GraphSyncError> {
log_trace!(self.logger, "Processing RGS data...");
let mut protocol_prefix = [0u8; 3];
let chain_hash: ChainHash = Readable::read(read_cursor)?;
let ng_chain_hash = self.network_graph.get_chain_hash();
if chain_hash != ng_chain_hash {
- return Err(
- LightningError {
- err: "Rapid Gossip Sync data's chain hash does not match the network graph's".to_owned(),
- action: ErrorAction::IgnoreError,
- }.into()
- );
+ return Err(LightningError {
+ err: "Rapid Gossip Sync data's chain hash does not match the network graph's"
+ .to_owned(),
+ action: ErrorAction::IgnoreError,
+ }
+ .into());
}
let latest_seen_timestamp: u32 = Readable::read(read_cursor)?;
if let Some(time) = current_time_unix {
- if (latest_seen_timestamp as u64) < time.saturating_sub(STALE_RGS_UPDATE_AGE_LIMIT_SECS) {
- return Err(LightningError{err: "Rapid Gossip Sync data is more than two weeks old".to_owned(), action: ErrorAction::IgnoreError}.into());
+ if (latest_seen_timestamp as u64) < time.saturating_sub(STALE_RGS_UPDATE_AGE_LIMIT_SECS)
+ {
+ return Err(LightningError {
+ err: "Rapid Gossip Sync data is more than two weeks old".to_owned(),
+ action: ErrorAction::IgnoreError,
+ }
+ .into());
}
}
excess_data: Vec::new(),
};
- read_only_network_graph.nodes()
+ read_only_network_graph
+ .nodes()
.get(¤t_node_id)
.and_then(|node| node.announcement_info.as_ref())
.map(|info| {
let mut node_addresses: Vec<SocketAddress> = Vec::new();
for address_index in 0..address_count {
let current_byte_count: u8 = Readable::read(read_cursor)?;
- let mut address_reader = FixedLengthReader::new(&mut read_cursor, current_byte_count as u64);
+ let mut address_reader =
+ FixedLengthReader::new(&mut read_cursor, current_byte_count as u64);
if let Ok(current_address) = Readable::read(&mut address_reader) {
node_addresses.push(current_address);
if address_reader.bytes_remain() {
log_gossip!(
self.logger,
"Failure to parse address at index {} for node ID {}",
- address_index, current_node_id
+ address_index,
+ current_node_id
);
address_reader.eat_remaining()?;
}
if has_additional_data {
let additional_data: Vec<u8> = Readable::read(read_cursor)?;
- log_gossip!(self.logger, "Ignoring {} bytes of additional data in node announcement", additional_data.len());
+ log_gossip!(
+ self.logger,
+ "Ignoring {} bytes of additional data in node announcement",
+ additional_data.len()
+ );
}
}
} else {
// handle SCID
let scid_delta: BigSize = Readable::read(read_cursor)?;
- let short_channel_id = previous_scid
- .checked_add(scid_delta.0)
- .ok_or(DecodeError::InvalidValue)?;
+ let short_channel_id =
+ previous_scid.checked_add(scid_delta.0).ok_or(DecodeError::InvalidValue)?;
previous_scid = short_channel_id;
let node_id_1_index: BigSize = Readable::read(read_cursor)?;
let mut node_id_2_index: BigSize = Readable::read(read_cursor)?;
let has_additional_data = (node_id_2_index.0 & (1 << 63)) > 0;
- node_id_2_index.0 &= !(1 << 63); // ensure 63rd bit isn't set
+ // ensure 63rd bit isn't set
+ node_id_2_index.0 &= !(1 << 63);
if max(node_id_1_index.0, node_id_2_index.0) >= node_id_count as u64 {
return Err(DecodeError::InvalidValue.into());
let node_id_1 = node_ids[node_id_1_index.0 as usize];
let node_id_2 = node_ids[node_id_2_index.0 as usize];
- log_gossip!(self.logger, "Adding channel {} from RGS announcement at {}",
- short_channel_id, latest_seen_timestamp);
+ log_gossip!(
+ self.logger,
+ "Adding channel {} from RGS announcement at {}",
+ short_channel_id,
+ latest_seen_timestamp
+ );
let announcement_result = network_graph.add_channel_from_partial_announcement(
short_channel_id,
if let ErrorAction::IgnoreDuplicateGossip = lightning_error.action {
// everything is fine, just a duplicate channel announcement
} else {
- log_warn!(self.logger, "Failed to process channel announcement: {:?}", lightning_error);
+ log_warn!(
+ self.logger,
+ "Failed to process channel announcement: {:?}",
+ lightning_error
+ );
return Err(lightning_error.into());
}
}
if version >= 2 && has_additional_data {
// forwards compatibility
let additional_data: Vec<u8> = Readable::read(read_cursor)?;
- log_gossip!(self.logger, "Ignoring {} bytes of additional data in channel announcement", additional_data.len());
+ log_gossip!(
+ self.logger,
+ "Ignoring {} bytes of additional data in channel announcement",
+ additional_data.len()
+ );
}
}
for modification in node_modifications {
match network_graph.update_node_from_unsigned_announcement(&modification) {
- Ok(_) => {}
- Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {}
+ Ok(_) => {},
+ Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => {
- log_given_level!(self.logger, level, "Failed to apply node announcement: {:?}", err);
- }
+ log_given_level!(
+ self.logger,
+ level,
+ "Failed to apply node announcement: {:?}",
+ err
+ );
+ },
Err(LightningError { action: ErrorAction::IgnoreError, err }) => {
log_gossip!(self.logger, "Failed to apply node announcement: {:?}", err);
- }
+ },
Err(e) => return Err(e.into()),
}
}
- previous_scid = 0; // updates start at a new scid
+ // updates start at a new scid
+ previous_scid = 0;
let update_count: u32 = Readable::read(read_cursor)?;
log_debug!(self.logger, "Processing RGS update from {} with {} nodes, {} channel announcements and {} channel updates.",
for _ in 0..update_count {
let scid_delta: BigSize = Readable::read(read_cursor)?;
- let short_channel_id = previous_scid
- .checked_add(scid_delta.0)
- .ok_or(DecodeError::InvalidValue)?;
+ let short_channel_id =
+ previous_scid.checked_add(scid_delta.0).ok_or(DecodeError::InvalidValue)?;
previous_scid = short_channel_id;
let channel_flags: u8 = Readable::read(read_cursor)?;
if scid_delta.0 == 0 && is_same_direction_update {
// this is additional data for forwards compatibility
let additional_data: Vec<u8> = Readable::read(read_cursor)?;
- log_gossip!(self.logger, "Ignoring {} bytes of additional data in channel update", additional_data.len());
+ log_gossip!(
+ self.logger,
+ "Ignoring {} bytes of additional data in channel update",
+ additional_data.len()
+ );
continue;
}
}
if (channel_flags & 0b_1000_0000) != 0 {
// incremental update, field flags will indicate mutated values
let read_only_network_graph = network_graph.read_only();
- if let Some(directional_info) =
- read_only_network_graph.channels().get(&short_channel_id)
+ if let Some(directional_info) = read_only_network_graph
+ .channels()
+ .get(&short_channel_id)
.and_then(|channel| channel.get_directional_info(channel_flags))
{
synthetic_update.cltv_expiry_delta = directional_info.cltv_expiry_delta;
synthetic_update.htlc_minimum_msat = directional_info.htlc_minimum_msat;
synthetic_update.htlc_maximum_msat = directional_info.htlc_maximum_msat;
synthetic_update.fee_base_msat = directional_info.fees.base_msat;
- synthetic_update.fee_proportional_millionths = directional_info.fees.proportional_millionths;
+ synthetic_update.fee_proportional_millionths =
+ directional_info.fees.proportional_millionths;
} else {
log_trace!(self.logger,
"Skipping application of channel update for chan {} with flags {} as original data is missing.",
continue;
}
- log_gossip!(self.logger, "Updating channel {} with flags {} from RGS announcement at {}",
- short_channel_id, channel_flags, latest_seen_timestamp);
+ log_gossip!(
+ self.logger,
+ "Updating channel {} with flags {} from RGS announcement at {}",
+ short_channel_id,
+ channel_flags,
+ latest_seen_timestamp
+ );
match network_graph.update_channel_unsigned(&synthetic_update) {
Ok(_) => {},
Err(LightningError { action: ErrorAction::IgnoreDuplicateGossip, .. }) => {},
Err(LightningError { action: ErrorAction::IgnoreAndLog(level), err }) => {
- log_given_level!(self.logger, level, "Failed to apply channel update: {:?}", err);
+ log_given_level!(
+ self.logger,
+ level,
+ "Failed to apply channel update: {:?}",
+ err
+ );
},
Err(LightningError { action: ErrorAction::IgnoreError, .. }) => {},
Err(e) => return Err(e.into()),
use crate::{GraphSyncError, RapidGossipSync};
const VALID_RGS_BINARY: [u8; 300] = [
- 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
- 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218,
- 0, 0, 0, 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251,
- 187, 172, 38, 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125,
- 157, 176, 223, 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136,
- 88, 216, 115, 11, 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106,
- 204, 131, 186, 35, 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138,
- 181, 64, 187, 103, 127, 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175,
- 110, 32, 237, 0, 217, 90, 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128,
- 76, 97, 0, 0, 0, 2, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68,
- 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4, 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232,
- 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192, 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0,
- 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224, 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0,
- 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116, 216, 255, 2, 68, 226, 0, 6, 11, 0, 1,
- 0, 0, 1,
+ 76, 68, 75, 1, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79,
+ 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 97, 227, 98, 218, 0, 0, 0,
+ 4, 2, 22, 7, 207, 206, 25, 164, 197, 231, 230, 231, 56, 102, 61, 250, 251, 187, 172, 38,
+ 46, 79, 247, 108, 44, 155, 48, 219, 238, 252, 53, 192, 6, 67, 2, 36, 125, 157, 176, 223,
+ 175, 234, 116, 94, 248, 201, 225, 97, 235, 50, 47, 115, 172, 63, 136, 88, 216, 115, 11,
+ 111, 217, 114, 84, 116, 124, 231, 107, 2, 158, 1, 242, 121, 152, 106, 204, 131, 186, 35,
+ 93, 70, 216, 10, 237, 224, 183, 89, 95, 65, 3, 83, 185, 58, 138, 181, 64, 187, 103, 127,
+ 68, 50, 2, 201, 19, 17, 138, 136, 149, 185, 226, 156, 137, 175, 110, 32, 237, 0, 217, 90,
+ 31, 100, 228, 149, 46, 219, 175, 168, 77, 4, 143, 38, 128, 76, 97, 0, 0, 0, 2, 0, 0, 255,
+ 8, 153, 192, 0, 2, 27, 0, 0, 0, 1, 0, 0, 255, 2, 68, 226, 0, 6, 11, 0, 1, 2, 3, 0, 0, 0, 4,
+ 0, 40, 0, 0, 0, 0, 0, 0, 3, 232, 0, 0, 3, 232, 0, 0, 0, 1, 0, 0, 0, 0, 29, 129, 25, 192,
+ 255, 8, 153, 192, 0, 2, 27, 0, 0, 60, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 100, 0, 0, 2, 224,
+ 0, 0, 0, 0, 58, 85, 116, 216, 0, 29, 0, 0, 0, 1, 0, 0, 0, 125, 0, 0, 0, 0, 58, 85, 116,
+ 216, 255, 2, 68, 226, 0, 6, 11, 0, 1, 0, 0, 1,
];
const VALID_BINARY_TIMESTAMP: u64 = 1642291930;
76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 97, 206, 240,
0, 0, 0, 0, 2, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5,
- 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 38, 4, 1,
+ 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 38, 4,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 0, 2, 3, 0, 4, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5, 10, 103, 111, 111, 103, 108,
- 101, 46, 99, 111, 109, 1, 187, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5,
- 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 2, 23, 48, 62, 77, 75, 108, 209,
- 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66,
- 51, 116, 196, 81, 167, 37, 77, 7, 102, 0, 0, 2, 25, 48, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0,
- 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 1
+ 1, 1, 1, 0, 2, 3, 0, 4, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5, 10, 103, 111, 111, 103,
+ 108, 101, 46, 99, 111, 109, 1, 187, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 2, 23, 48, 62, 77, 75, 108,
+ 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201,
+ 66, 51, 116, 196, 81, 167, 37, 77, 7, 102, 0, 0, 2, 25, 48, 0, 0, 0, 1, 0, 0, 1, 0, 1,
+ 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 1,
];
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
{
// address node
- let node_id = NodeId::from_slice(&[3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143]).unwrap();
+ let node_id = NodeId::from_slice(&[
+ 3, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30,
+ 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143,
+ ])
+ .unwrap();
let node = nodes.get(&node_id).unwrap();
let announcement_info = node.announcement_info.as_ref().unwrap();
let addresses = announcement_info.addresses();
{
// feature node
- let node_id = NodeId::from_slice(&[2, 77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102]).unwrap();
+ let node_id = NodeId::from_slice(&[
+ 2, 77, 75, 108, 209, 54, 16, 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217,
+ 234, 216, 10, 201, 66, 51, 116, 196, 81, 167, 37, 77, 7, 102,
+ ])
+ .unwrap();
let node = nodes.get(&node_id).unwrap();
let announcement_info = node.announcement_info.as_ref().unwrap();
let features = announcement_info.features();
// assert_eq!(addresses.len(), 5);
}
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Failed to apply node announcement", 0);
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Failed to apply node announcement",
+ 0,
+ );
}
#[test]
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let example_input = vec![
- 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247, 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 105, 183, 240, 0, 0, 0, 0, 1, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 57, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 14, 5, 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 2, 23, 48, 0, 0, 0, 0, 0, 0, 0, 0,
+ 76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
+ 79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 105, 183,
+ 240, 0, 0, 0, 0, 1, 63, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213, 170, 186,
+ 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143, 5, 13,
+ 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 19, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5, 57, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 14, 5,
+ 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 2, 23, 48, 0, 0, 0, 0,
+ 0, 0, 0, 0,
];
let update_result = rapid_sync.update_network_graph_no_std(&example_input[..], None);
assert!(update_result.is_ok());
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Failed to apply node announcement: \"No existing channels for node_announcement\"", 1);
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Failed to apply node announcement: \"No existing channels for node_announcement\"",
+ 1,
+ );
}
#[test]
76, 68, 75, 2, 111, 226, 140, 10, 182, 241, 179, 114, 193, 166, 162, 70, 174, 99, 247,
79, 147, 30, 131, 101, 225, 90, 8, 156, 104, 214, 25, 0, 0, 0, 0, 0, 102, 106, 12, 80,
1, 0, 2, 23, 48, 0, 0, 0, 3, 143, 27, 132, 197, 86, 123, 18, 100, 64, 153, 93, 62, 213,
- 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7, 143,
- 5, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163, 14, 5,
- 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 255, 0, 0, 0, 0, 0, 0, 0,
+ 170, 186, 5, 101, 215, 30, 24, 52, 96, 72, 25, 255, 156, 23, 245, 233, 213, 221, 7,
+ 143, 5, 38, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 0, 4, 19, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 5, 57, 13, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 7, 1, 127, 0, 0, 1, 37, 163,
+ 14, 5, 10, 103, 111, 111, 103, 108, 101, 46, 99, 111, 109, 1, 187, 0, 255, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 77, 75, 108, 209, 54, 16,
+ 50, 202, 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116,
+ 196, 81, 167, 37, 77, 7, 102, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 138, 77, 75, 108, 209, 54, 16, 50, 202,
- 155, 210, 174, 185, 217, 0, 170, 77, 69, 217, 234, 216, 10, 201, 66, 51, 116, 196, 81,
- 167, 37, 77, 7, 102, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 186, 83, 31, 230, 6, 129, 52, 80, 61, 39, 35, 19, 50, 39, 200,
+ 103, 172, 143, 166, 200, 60, 83, 126, 154, 68, 195, 197, 189, 189, 203, 31, 227, 55, 0,
+ 2, 22, 49, 0, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 186, 83, 31, 230, 6, 129, 52, 80, 61, 39, 35, 19, 50, 39, 200, 103, 172, 143,
- 166, 200, 60, 83, 126, 154, 68, 195, 197, 189, 189, 203, 31, 227, 55, 0, 2, 22, 49, 0,
- 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 1, 0, 255, 128, 0, 0, 0, 0, 0, 0, 1, 0, 147, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 0, 0, 0, 1, 0, 0, 1, 0, 255, 128, 0, 0, 0, 0, 0, 0, 1, 0, 147, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
- 23, 23, 23, 23, 23, 23, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 17, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42, 0, 1, 0, 1, 0, 17, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
- 42, 42, 42, 42, 42, 42, 42
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 17, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 0, 1, 0, 1, 0, 17, 42, 42, 42, 42, 42, 42, 42,
+ 42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
];
let update_result = rapid_sync.update_network_graph_no_std(&example_input[..], None);
assert!(update_result.is_ok());
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 255 bytes of additional data in node announcement", 3);
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 147 bytes of additional data in channel announcement", 1);
- logger.assert_log_contains("lightning_rapid_gossip_sync::processing", "Ignoring 17 bytes of additional data in channel update", 1);
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Ignoring 255 bytes of additional data in node announcement",
+ 3,
+ );
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Ignoring 147 bytes of additional data in channel announcement",
+ 1,
+ );
+ logger.assert_log_contains(
+ "lightning_rapid_gossip_sync::processing",
+ "Ignoring 17 bytes of additional data in channel update",
+ 1,
+ );
}
#[test]
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
let initialization_result = rapid_sync.update_network_graph(&initialization_input[..]);
if initialization_result.is_err() {
- panic!(
- "Unexpected initialization result: {:?}",
- initialization_result
- )
+ panic!("Unexpected initialization result: {:?}", initialization_result)
}
assert_eq!(network_graph.read_only().channels().len(), 2);
0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
68, 226, 0, 6, 11, 0, 1, 128,
];
- let update_result = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
+ let update_result =
+ rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
if update_result.is_err() {
panic!("Unexpected update result: {:?}", update_result)
}
0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 8, 153, 192, 0, 2, 27, 0, 0, 136, 0, 0, 0, 221, 255, 2,
68, 226, 0, 6, 11, 0, 1, 128,
];
- let update_result_1 = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
+ let update_result_1 =
+ rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
// Apply duplicate update
- let update_result_2 = rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
+ let update_result_2 =
+ rapid_sync.update_network_graph(&single_direction_incremental_update_input[..]);
assert!(update_result_1.is_ok());
assert!(update_result_2.is_ok());
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time));
assert!(update_result.is_ok());
assert_eq!(network_graph.read_only().channels().len(), 2);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time + 1));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_nonpruning_time + 1));
assert!(update_result.is_ok());
assert_eq!(network_graph.read_only().channels().len(), 0);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_succeeding_time));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(latest_succeeding_time));
assert!(update_result.is_ok());
assert_eq!(network_graph.read_only().channels().len(), 0);
}
assert_eq!(network_graph.read_only().channels().len(), 0);
let rapid_sync = RapidGossipSync::new(&network_graph, &logger);
- let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(earliest_failing_time));
+ let update_result = rapid_sync
+ .update_network_graph_no_std(&VALID_RGS_BINARY, Some(earliest_failing_time));
assert!(update_result.is_err());
if let Err(GraphSyncError::LightningError(lightning_error)) = update_result {
assert_eq!(
let update_result = rapid_sync.update_network_graph_no_std(&VALID_RGS_BINARY, Some(0));
assert!(update_result.is_err());
if let Err(GraphSyncError::LightningError(err)) = update_result {
- assert_eq!(err.err, "Rapid Gossip Sync data's chain hash does not match the network graph's");
+ assert_eq!(
+ err.err,
+ "Rapid Gossip Sync data's chain hash does not match the network graph's"
+ );
} else {
panic!("Unexpected update result: {:?}", update_result)
}
use core::ops::Deref;
/// An intermediate node, and possibly a short channel id leading to the next node.
-#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct ForwardNode {
/// This node's pubkey.
pub node_id: PublicKey,
// Advance the blinded onion message path by one hop, so make the second hop into the new
// introduction node.
+//
+// Will only modify `path` when returning `Ok`.
pub(crate) fn advance_path_by_one<NS: Deref, NL: Deref, T>(
path: &mut BlindedPath, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1<T>
) -> Result<(), ()>
{
let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &path.blinding_point, None)?;
let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes());
- let encrypted_control_tlvs = path.blinded_hops.remove(0).encrypted_payload;
- let mut s = Cursor::new(&encrypted_control_tlvs);
+ let encrypted_control_tlvs = &path.blinded_hops.get(0).ok_or(())?.encrypted_payload;
+ let mut s = Cursor::new(encrypted_control_tlvs);
let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64);
match ChaChaPolyReadAdapter::read(&mut reader, rho) {
Ok(ChaChaPolyReadAdapter {
};
mem::swap(&mut path.blinding_point, &mut new_blinding_point);
path.introduction_node = IntroductionNode::NodeId(next_node_id);
+ path.blinded_hops.remove(0);
Ok(())
},
_ => Err(())
use bitcoin::secp256k1::{self, PublicKey, Secp256k1, SecretKey};
-use crate::blinded_path::BlindedHop;
+use crate::blinded_path::{BlindedHop, BlindedPath, IntroductionNode, NodeIdLookUp};
use crate::blinded_path::utils;
+use crate::crypto::streams::ChaChaPolyReadAdapter;
use crate::io;
+use crate::io::Cursor;
use crate::ln::types::PaymentSecret;
use crate::ln::channel_state::CounterpartyForwardingInfo;
use crate::ln::features::BlindedHopFeatures;
use crate::ln::msgs::DecodeError;
+use crate::ln::onion_utils;
use crate::offers::invoice::BlindedPayInfo;
use crate::offers::invoice_request::InvoiceRequestFields;
use crate::offers::offer::OfferId;
-use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, Writeable, Writer};
+use crate::sign::{NodeSigner, Recipient};
+use crate::util::ser::{FixedLengthReader, LengthReadableArgs, HighZeroBytesDroppedBigSize, Readable, Writeable, Writer};
+
+use core::mem;
+use core::ops::Deref;
#[allow(unused_imports)]
use crate::prelude::*;
utils::construct_blinded_hops(secp_ctx, pks, tlvs, session_priv)
}
+// Advance the blinded onion payment path by one hop, so make the second hop into the new
+// introduction node.
+//
+// Will only modify `path` when returning `Ok`.
+pub(crate) fn advance_path_by_one<NS: Deref, NL: Deref, T>(
+ path: &mut BlindedPath, node_signer: &NS, node_id_lookup: &NL, secp_ctx: &Secp256k1<T>
+) -> Result<(), ()>
+where
+ NS::Target: NodeSigner,
+ NL::Target: NodeIdLookUp,
+ T: secp256k1::Signing + secp256k1::Verification,
+{
+ let control_tlvs_ss = node_signer.ecdh(Recipient::Node, &path.blinding_point, None)?;
+ let rho = onion_utils::gen_rho_from_shared_secret(&control_tlvs_ss.secret_bytes());
+ let encrypted_control_tlvs = &path.blinded_hops.get(0).ok_or(())?.encrypted_payload;
+ let mut s = Cursor::new(encrypted_control_tlvs);
+ let mut reader = FixedLengthReader::new(&mut s, encrypted_control_tlvs.len() as u64);
+ match ChaChaPolyReadAdapter::read(&mut reader, rho) {
+ Ok(ChaChaPolyReadAdapter {
+ readable: BlindedPaymentTlvs::Forward(ForwardTlvs { short_channel_id, .. })
+ }) => {
+ let next_node_id = match node_id_lookup.next_node_id(short_channel_id) {
+ Some(node_id) => node_id,
+ None => return Err(()),
+ };
+ let mut new_blinding_point = onion_utils::next_hop_pubkey(
+ secp_ctx, path.blinding_point, control_tlvs_ss.as_ref()
+ ).map_err(|_| ())?;
+ mem::swap(&mut path.blinding_point, &mut new_blinding_point);
+ path.introduction_node = IntroductionNode::NodeId(next_node_id);
+ path.blinded_hops.remove(0);
+ Ok(())
+ },
+ _ => Err(())
+ }
+}
+
/// `None` if underflow occurs.
pub(crate) fn amt_to_forward_msat(inbound_amt_msat: u64, payment_relay: &PaymentRelay) -> Option<u64> {
let inbound_amt = inbound_amt_msat as u128;
use crate::sync::{RwLock, RwLockReadGuard, Mutex, MutexGuard};
use core::ops::Deref;
use core::sync::atomic::{AtomicUsize, Ordering};
+use bitcoin::hashes::Hash;
use bitcoin::secp256k1::PublicKey;
/// `Persist` defines behavior for persisting channel monitors: this could mean
{
let err_str = "ChannelMonitor[Update] persistence failed unrecoverably. This indicates we cannot continue normal operation and must shut down.";
let funding_outpoints = hash_set_from_iter(self.monitors.read().unwrap().keys().cloned());
+ let channel_count = funding_outpoints.len();
for funding_outpoint in funding_outpoints.iter() {
let monitor_lock = self.monitors.read().unwrap();
if let Some(monitor_state) = monitor_lock.get(funding_outpoint) {
- if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
+ if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
// Take the monitors lock for writing so that we poison it and any future
// operations going forward fail immediately.
core::mem::drop(monitor_lock);
let monitor_states = self.monitors.write().unwrap();
for (funding_outpoint, monitor_state) in monitor_states.iter() {
if !funding_outpoints.contains(funding_outpoint) {
- if self.update_monitor_with_chain_data(header, txdata, &process, funding_outpoint, &monitor_state).is_err() {
+ if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state, channel_count).is_err() {
log_error!(self.logger, "{}", err_str);
panic!("{}", err_str);
}
}
fn update_monitor_with_chain_data<FN>(
- &self, header: &Header, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
- monitor_state: &MonitorHolder<ChannelSigner>
+ &self, header: &Header, best_height: Option<u32>, txdata: &TransactionData, process: FN, funding_outpoint: &OutPoint,
+ monitor_state: &MonitorHolder<ChannelSigner>, channel_count: usize,
) -> Result<(), ()> where FN: Fn(&ChannelMonitor<ChannelSigner>, &TransactionData) -> Vec<TransactionOutputs> {
let monitor = &monitor_state.monitor;
let logger = WithChannelMonitor::from(&self.logger, &monitor, None);
- let mut txn_outputs;
- {
- txn_outputs = process(monitor, txdata);
+
+ let mut txn_outputs = process(monitor, txdata);
+
+ let get_partition_key = |funding_outpoint: &OutPoint| {
+ let funding_txid_hash = funding_outpoint.txid.to_raw_hash();
+ let funding_txid_hash_bytes = funding_txid_hash.as_byte_array();
+ let funding_txid_u32 = u32::from_be_bytes([funding_txid_hash_bytes[0], funding_txid_hash_bytes[1], funding_txid_hash_bytes[2], funding_txid_hash_bytes[3]]);
+ funding_txid_u32.wrapping_add(best_height.unwrap_or_default())
+ };
+
+ let partition_factor = if channel_count < 15 {
+ 5
+ } else {
+ 50 // ~ 8hours
+ };
+
+ let has_pending_claims = monitor_state.monitor.has_pending_claims();
+ if has_pending_claims || get_partition_key(funding_outpoint) % partition_factor == 0 {
log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor));
match self.persister.update_persisted_channel(*funding_outpoint, None, monitor) {
ChannelMonitorUpdateStatus::Completed =>
),
ChannelMonitorUpdateStatus::InProgress => {
log_trace!(logger, "Channel Monitor sync for channel {} in progress.", log_funding_info!(monitor));
- },
+ }
ChannelMonitorUpdateStatus::UnrecoverableError => {
return Err(());
- },
+ }
}
}
#[cfg(test)]
mod tests {
- use crate::check_added_monitors;
+ use crate::{check_added_monitors, check_closed_event};
use crate::{expect_payment_path_successful, get_event_msg};
use crate::{get_htlc_update_msgs, get_revoke_commit_msgs};
use crate::chain::{ChannelMonitorUpdateStatus, Watch};
- use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
+ use crate::chain::channelmonitor::ANTI_REORG_DELAY;
+ use crate::events::{ClosureReason, Event, MessageSendEvent, MessageSendEventsProvider};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
+ const CHAINSYNC_MONITOR_PARTITION_FACTOR: u32 = 5;
+
#[test]
fn test_async_ooo_offchain_updates() {
// Test that if we have multiple offchain updates being persisted and they complete
check_added_monitors!(nodes[0], 1);
}
+ #[test]
+ fn test_chainsync_triggers_distributed_monitor_persistence() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Use FullBlockViaListen to avoid duplicate calls to process_chain_data and skips_blocks() in
+ // case of other connect_styles.
+ *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
+ *nodes[1].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
+ *nodes[2].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
+
+ let _channel_1 = create_announced_chan_between_nodes(&nodes, 0, 1).2;
+ let channel_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 1_000_000, 0).2;
+
+ chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+ connect_blocks(&nodes[1], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+ connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR * 2);
+
+ // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] * 2 blocks should trigger only 2 writes
+ // per monitor/channel.
+ assert_eq!(2 * 2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ assert_eq!(2, chanmon_cfgs[1].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ assert_eq!(2, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+
+ // Test that monitors with pending_claims are persisted on every block.
+ // Now, close channel_2 i.e. b/w node-0 and node-2 to create pending_claim in node[0].
+ nodes[0].node.force_close_broadcasting_latest_txn(&channel_2, &nodes[2].node.get_our_node_id(), "Channel force-closed".to_string()).unwrap();
+ check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false,
+ [nodes[2].node.get_our_node_id()], 1000000);
+ check_closed_broadcast(&nodes[0], 1, true);
+ let close_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
+ assert_eq!(close_tx.len(), 1);
+
+ mine_transaction(&nodes[2], &close_tx[0]);
+ check_added_monitors(&nodes[2], 1);
+ check_closed_broadcast(&nodes[2], 1, true);
+ check_closed_event!(&nodes[2], 1, ClosureReason::CommitmentTxConfirmed, false,
+ [nodes[0].node.get_our_node_id()], 1000000);
+
+ chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+ chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+
+ // For channel_2, there should be a monitor write for every block connection.
+ // We connect [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`] blocks since we don't know when
+ // channel_1 monitor persistence will occur, with [`DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR`]
+ // it will be persisted exactly once.
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
+ connect_blocks(&nodes[2], CHAINSYNC_MONITOR_PARTITION_FACTOR);
+
+ // DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR writes for channel_2 due to pending_claim, 1 for
+ // channel_1
+ assert_eq!((CHAINSYNC_MONITOR_PARTITION_FACTOR + 1) as usize, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ // For node[2], there is no pending_claim
+ assert_eq!(1, chanmon_cfgs[2].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+
+ // Confirm claim for node[0] with ANTI_REORG_DELAY and reset monitor write counter.
+ mine_transaction(&nodes[0], &close_tx[0]);
+ connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1);
+ check_added_monitors(&nodes[0], 1);
+ chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clear();
+
+ // Again connect 1 full cycle of DEFAULT_CHAINSYNC_MONITOR_PARTITION_FACTOR blocks, it should only
+ // result in 1 write per monitor/channel.
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
+ assert_eq!(2, chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().len());
+ }
+
#[test]
#[cfg(feature = "std")]
fn update_during_chainsync_poisons_channel() {
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1);
+ *nodes[0].connect_style.borrow_mut() = ConnectStyle::FullBlockViaListen;
chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::UnrecoverableError);
assert!(std::panic::catch_unwind(|| {
// Returning an UnrecoverableError should always panic immediately
- connect_blocks(&nodes[0], 1);
+ // Connecting [`DEFAULT_CHAINSYNC_PARTITION_FACTOR`] blocks so that we trigger some persistence
+ // after accounting for block-height based partitioning/distribution.
+ connect_blocks(&nodes[0], CHAINSYNC_MONITOR_PARTITION_FACTOR);
}).is_err());
assert!(std::panic::catch_unwind(|| {
// ...and also poison our locks causing later use to panic as well
);
}
+ /// Returns true if the monitor has pending claim requests that are not fully confirmed yet.
+ pub fn has_pending_claims(&self) -> bool
+ {
+ self.inner.lock().unwrap().onchain_tx_handler.has_pending_claims()
+ }
+
/// Triggers rebroadcasts of pending claims from a force-closed channel after a transaction
/// signature generation failure.
pub fn signer_unblocked<B: Deref, F: Deref, L: Deref>(
}
#[cfg(test)]
- pub fn do_signer_call<F: FnMut(&Signer) -> ()>(&self, mut f: F) {
- let inner = self.inner.lock().unwrap();
- f(&inner.onchain_tx_handler.signer);
+ pub fn do_mut_signer_call<F: FnMut(&mut Signer) -> ()>(&self, mut f: F) {
+ let mut inner = self.inner.lock().unwrap();
+ f(&mut inner.onchain_tx_handler.signer);
}
}
}
}
+ /// Returns true if we are currently tracking any pending claim requests that are not fully
+ /// confirmed yet.
+ pub(super) fn has_pending_claims(&self) -> bool
+ {
+ self.pending_claim_requests.len() != 0
+ }
+
/// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty
/// onchain) lays on the assumption of claim transactions getting confirmed before timelock
/// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck
/// If the recipient or an intermediate node misbehaves and gives us free money, this may
/// overstate the amount paid, though this is unlikely.
///
+ /// This is only `None` for payments initiated on LDK versions prior to 0.0.103.
+ ///
/// [`Route::get_total_fees`]: crate::routing::router::Route::get_total_fees
fee_paid_msat: Option<u64>,
},
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
+use crate::util::test_channel_signer::SignerOp;
#[test]
fn test_async_commitment_signature_for_funding_created() {
// But! Let's make node[0]'s signer be unavailable: we should *not* broadcast a funding_created
// message...
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment);
nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
check_added_monitors(&nodes[0], 0);
channels[0].channel_id
};
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
nodes[0].node.signer_unblocked(Some((nodes[1].node.get_our_node_id(), chan_id)));
let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
// Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should
// *not* broadcast a `funding_signed`...
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[1].disable_channel_signer_op(&nodes[0].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors(&nodes[1], 1);
assert_eq!(channels.len(), 1, "expected one channel, not {}", channels.len());
channels[0].channel_id
};
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true);
+ nodes[1].enable_channel_signer_op(&nodes[0].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id)));
expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
// Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a
// `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false);
+ dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors(dst, 1);
get_event_msg!(dst, MessageSendEvent::SendRevokeAndACK, src.node.get_our_node_id());
// Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true);
+ dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id)));
let events = dst.node.get_and_clear_pending_msg_events();
// Now let's make node[1]'s signer be unavailable while handling the `funding_created`. It should
// *not* broadcast a `funding_signed`...
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &temporary_channel_id, false);
+ nodes[1].disable_channel_signer_op(&nodes[0].node.get_our_node_id(), &temporary_channel_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
check_added_monitors(&nodes[1], 1);
};
// At this point, we basically expect the channel to open like a normal zero-conf channel.
- nodes[1].set_channel_signer_available(&nodes[0].node.get_our_node_id(), &chan_id, true);
+ nodes[1].enable_channel_signer_op(&nodes[0].node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
nodes[1].node.signer_unblocked(Some((nodes[0].node.get_our_node_id(), chan_id)));
let (funding_signed, channel_ready_1) = {
// Mark dst's signer as unavailable and handle src's commitment_signed: while dst won't yet have a
// `commitment_signed` of its own to offer, it should publish a `revoke_and_ack`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, false);
+ dst.disable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.handle_commitment_signed(&src.node.get_our_node_id(), &payment_event.commitment_msg);
check_added_monitors(dst, 1);
reconnect_nodes(reconnect_args);
// Mark dst's signer as available and retry: we now expect to see dst's `commitment_signed`.
- dst.set_channel_signer_available(&src.node.get_our_node_id(), &chan_id, true);
+ dst.enable_channel_signer_op(&src.node.get_our_node_id(), &chan_id, SignerOp::SignCounterpartyCommitment);
dst.node.signer_unblocked(Some((src.node.get_our_node_id(), chan_id)));
{
route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
let error_message = "Channel force-closed";
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false);
if remote_commitment {
// Make the counterparty broadcast its latest commitment.
check_closed_broadcast(&nodes[1], 1, true);
check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100_000);
} else {
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderHtlcTransaction);
// We'll connect blocks until the sender has to go onchain to time out the HTLC.
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty());
// Mark it as available now, we should see the signed commitment transaction.
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderHtlcTransaction);
get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger);
}
// Mark it as unavailable again to now test the HTLC transaction. We'll mine the commitment such
// that the HTLC transaction is retried.
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, false);
+ let sign_htlc_op = if remote_commitment {
+ SignerOp::SignCounterpartyHtlcTransaction
+ } else {
+ SignerOp::SignHolderHtlcTransaction
+ };
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].disable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, sign_htlc_op);
mine_transaction(&nodes[0], &commitment_tx);
check_added_monitors(&nodes[0], 1);
if anchors && !remote_commitment {
handle_bump_htlc_event(&nodes[0], 1);
}
- assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty());
+ let txn = nodes[0].tx_broadcaster.txn_broadcast();
+ assert!(txn.is_empty(), "expected no transaction to be broadcast, got {:?}", txn);
// Mark it as available now, we should see the signed HTLC transaction.
- nodes[0].set_channel_signer_available(&nodes[1].node.get_our_node_id(), &chan_id, true);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, SignerOp::SignHolderCommitment);
+ nodes[0].enable_channel_signer_op(&nodes[1].node.get_our_node_id(), &chan_id, sign_htlc_op);
get_monitor!(nodes[0], chan_id).signer_unblocked(nodes[0].tx_broadcaster, nodes[0].fee_estimator, &nodes[0].logger);
if anchors && !remote_commitment {
}
#[test]
-fn test_async_holder_signatures() {
+fn test_async_holder_signatures_no_anchors() {
do_test_async_holder_signatures(false, false);
+}
+
+#[test]
+fn test_async_holder_signatures_remote_commitment_no_anchors() {
do_test_async_holder_signatures(false, true);
+}
+
+#[test]
+fn test_async_holder_signatures_anchors() {
do_test_async_holder_signatures(true, false);
+}
+
+#[test]
+fn test_async_holder_signatures_remote_commitment_anchors() {
do_test_async_holder_signatures(true, true);
}
let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000);
chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);
+ // As long as the preimage isn't on-chain, we shouldn't expose the `PaymentClaimed` event to
+ // users nor send the preimage to peers in the new commitment update.
nodes[1].node.claim_funds(payment_preimage_1);
+ assert!(nodes[1].node.get_and_clear_pending_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
check_added_monitors!(nodes[1], 1);
/// Returns the holder signer for this channel.
#[cfg(test)]
- pub fn get_signer(&self) -> &ChannelSignerType<SP> {
- return &self.holder_signer
+ pub fn get_mut_signer(&mut self) -> &mut ChannelSignerType<SP> {
+ return &mut self.holder_signer
}
/// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
}
}
+ /// Performs checks against necessary constraints after receiving either an `accept_channel` or
+ /// `accept_channel2` message.
+ pub fn do_accept_channel_checks(
+ &mut self, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures,
+ common_fields: &msgs::CommonAcceptChannelFields, channel_reserve_satoshis: u64,
+ ) -> Result<(), ChannelError> {
+ let peer_limits = if let Some(ref limits) = self.inbound_handshake_limits_override { limits } else { default_limits };
+
+ // Check sanity of message fields:
+ if !self.is_outbound() {
+ return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned()));
+ }
+ if !matches!(self.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
+ return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned()));
+ }
+ if common_fields.dust_limit_satoshis > 21000000 * 100000000 {
+ return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", common_fields.dust_limit_satoshis)));
+ }
+ if channel_reserve_satoshis > self.channel_value_satoshis {
+ return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", channel_reserve_satoshis, self.channel_value_satoshis)));
+ }
+ if common_fields.dust_limit_satoshis > self.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", common_fields.dust_limit_satoshis, self.holder_selected_channel_reserve_satoshis)));
+ }
+ if channel_reserve_satoshis > self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis {
+ return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
+ channel_reserve_satoshis, self.channel_value_satoshis - self.holder_selected_channel_reserve_satoshis)));
+ }
+ let full_channel_value_msat = (self.channel_value_satoshis - channel_reserve_satoshis) * 1000;
+ if common_fields.htlc_minimum_msat >= full_channel_value_msat {
+ return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", common_fields.htlc_minimum_msat, full_channel_value_msat)));
+ }
+ let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
+ if common_fields.to_self_delay > max_delay_acceptable {
+ return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, common_fields.to_self_delay)));
+ }
+ if common_fields.max_accepted_htlcs < 1 {
+ return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
+ }
+ if common_fields.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", common_fields.max_accepted_htlcs, MAX_HTLCS)));
+ }
+
+ // Now check against optional parameters as set by config...
+ if common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
+ return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
+ }
+ if common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
+ return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
+ }
+ if channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
+ return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
+ }
+ if common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
+ return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
+ }
+ if common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
+ return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
+ }
+ if common_fields.minimum_depth > peer_limits.max_minimum_depth {
+ return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, common_fields.minimum_depth)));
+ }
+
+ if let Some(ty) = &common_fields.channel_type {
+ if *ty != self.channel_type {
+ return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
+ }
+ } else if their_features.supports_channel_type() {
+ // Assume they've accepted the channel type as they said they understand it.
+ } else {
+ let channel_type = ChannelTypeFeatures::from_init(&their_features);
+ if channel_type != ChannelTypeFeatures::only_static_remote_key() {
+ return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
+ }
+ self.channel_type = channel_type.clone();
+ self.channel_transaction_parameters.channel_type_features = channel_type;
+ }
+
+ let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
+ match &common_fields.shutdown_scriptpubkey {
+ &Some(ref script) => {
+ // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
+ if script.len() == 0 {
+ None
+ } else {
+ if !script::is_bolt2_compliant(&script, their_features) {
+ return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
+ }
+ Some(script.clone())
+ }
+ },
+ // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
+ &None => {
+ return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
+ }
+ }
+ } else { None };
+
+ self.counterparty_dust_limit_satoshis = common_fields.dust_limit_satoshis;
+ self.counterparty_max_htlc_value_in_flight_msat = cmp::min(common_fields.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000);
+ self.counterparty_selected_channel_reserve_satoshis = Some(channel_reserve_satoshis);
+ self.counterparty_htlc_minimum_msat = common_fields.htlc_minimum_msat;
+ self.counterparty_max_accepted_htlcs = common_fields.max_accepted_htlcs;
+
+ if peer_limits.trust_own_funding_0conf {
+ self.minimum_depth = Some(common_fields.minimum_depth);
+ } else {
+ self.minimum_depth = Some(cmp::max(1, common_fields.minimum_depth));
+ }
+
+ let counterparty_pubkeys = ChannelPublicKeys {
+ funding_pubkey: common_fields.funding_pubkey,
+ revocation_basepoint: RevocationBasepoint::from(common_fields.revocation_basepoint),
+ payment_point: common_fields.payment_basepoint,
+ delayed_payment_basepoint: DelayedPaymentBasepoint::from(common_fields.delayed_payment_basepoint),
+ htlc_basepoint: HtlcBasepoint::from(common_fields.htlc_basepoint)
+ };
+
+ self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: common_fields.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ });
+
+ self.counterparty_cur_commitment_point = Some(common_fields.first_per_commitment_point);
+ self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
+
+ self.channel_state = ChannelState::NegotiatingFunding(
+ NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
+ );
+ self.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
+
+ Ok(())
+ }
+
/// Returns the block hash in which our funding transaction was confirmed.
pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
self.funding_tx_confirmed_in
}
// Message handlers
- pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
- let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
-
- // Check sanity of message fields:
- if !self.context.is_outbound() {
- return Err(ChannelError::close("Got an accept_channel message from an inbound peer".to_owned()));
- }
- if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
- return Err(ChannelError::close("Got an accept_channel message at a strange time".to_owned()));
- }
- if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
- return Err(ChannelError::close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
- }
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
- return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
- }
- if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
- }
- if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
- return Err(ChannelError::close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
- msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
- }
- let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
- if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
- return Err(ChannelError::close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
- }
- let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
- if msg.common_fields.to_self_delay > max_delay_acceptable {
- return Err(ChannelError::close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
- }
- if msg.common_fields.max_accepted_htlcs < 1 {
- return Err(ChannelError::close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
- }
- if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
- return Err(ChannelError::close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
- }
-
- // Now check against optional parameters as set by config...
- if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
- return Err(ChannelError::close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
- }
- if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
- return Err(ChannelError::close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
- }
- if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
- return Err(ChannelError::close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
- }
- if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
- return Err(ChannelError::close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
- }
- if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
- return Err(ChannelError::close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
- }
- if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
- return Err(ChannelError::close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
- }
-
- if let Some(ty) = &msg.common_fields.channel_type {
- if *ty != self.context.channel_type {
- return Err(ChannelError::close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
- }
- } else if their_features.supports_channel_type() {
- // Assume they've accepted the channel type as they said they understand it.
- } else {
- let channel_type = ChannelTypeFeatures::from_init(&their_features);
- if channel_type != ChannelTypeFeatures::only_static_remote_key() {
- return Err(ChannelError::close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
- }
- self.context.channel_type = channel_type.clone();
- self.context.channel_transaction_parameters.channel_type_features = channel_type;
- }
-
- let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
- match &msg.common_fields.shutdown_scriptpubkey {
- &Some(ref script) => {
- // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- if script.len() == 0 {
- None
- } else {
- if !script::is_bolt2_compliant(&script, their_features) {
- return Err(ChannelError::close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
- }
- Some(script.clone())
- }
- },
- // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
- &None => {
- return Err(ChannelError::close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
- }
- }
- } else { None };
-
- self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
- self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
- self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
- self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
- self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
-
- if peer_limits.trust_own_funding_0conf {
- self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
- } else {
- self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
- }
-
- let counterparty_pubkeys = ChannelPublicKeys {
- funding_pubkey: msg.common_fields.funding_pubkey,
- revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
- payment_point: msg.common_fields.payment_basepoint,
- delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
- htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
- };
-
- self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
- selected_contest_delay: msg.common_fields.to_self_delay,
- pubkeys: counterparty_pubkeys,
- });
-
- self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
- self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
-
- self.context.channel_state = ChannelState::NegotiatingFunding(
- NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
- );
- self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
-
- Ok(())
+ pub fn accept_channel(
+ &mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits,
+ their_features: &InitFeatures
+ ) -> Result<(), ChannelError> {
+ self.context.do_accept_channel_checks(default_limits, their_features, &msg.common_fields, msg.channel_reserve_satoshis)
}
/// Handles a funding_signed message from the remote end.
self.pending_outbound_payments
.send_payment_for_bolt12_invoice(
invoice, payment_id, &self.router, self.list_usable_channels(),
- || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer,
- best_block_height, &self.logger, &self.pending_events,
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, &self,
+ &self.secp_ctx, best_block_height, &self.logger, &self.pending_events,
|args| self.send_payment_along_path(args)
)
}
if short_chan_id != 0 {
let mut forwarding_counterparty = None;
macro_rules! forwarding_channel_not_found {
- () => {
- for forward_info in pending_forwards.drain(..) {
+ ($forward_infos: expr) => {
+ for forward_info in $forward_infos {
match forward_info {
HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
let (counterparty_node_id, forward_chan_id) = match chan_info_opt {
Some((cp_id, chan_id)) => (cp_id, chan_id),
None => {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
if peer_state_mutex_opt.is_none() {
- forwarding_channel_not_found!();
+ forwarding_channel_not_found!(pending_forwards.drain(..));
continue;
}
let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
- let logger = WithChannelContext::from(&self.logger, &chan.context, None);
- for forward_info in pending_forwards.drain(..) {
- let queue_fail_htlc_res = match forward_info {
- HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
- prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
- prev_user_channel_id, forward_info: PendingHTLCInfo {
- incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
- routing: PendingHTLCRouting::Forward {
- onion_packet, blinded, ..
- }, skimmed_fee_msat, ..
+ let mut draining_pending_forwards = pending_forwards.drain(..);
+ while let Some(forward_info) = draining_pending_forwards.next() {
+ let queue_fail_htlc_res = match forward_info {
+ HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
+ prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint,
+ prev_user_channel_id, forward_info: PendingHTLCInfo {
+ incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
+ routing: PendingHTLCRouting::Forward {
+ ref onion_packet, blinded, ..
+ }, skimmed_fee_msat, ..
+ },
+ }) => {
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ user_channel_id: Some(prev_user_channel_id),
+ channel_id: prev_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ // Phantom payments are only PendingHTLCRouting::Receive.
+ phantom_shared_secret: None,
+ blinded_failure: blinded.map(|b| b.failure),
+ });
+ let next_blinding_point = blinded.and_then(|b| {
+ let encrypted_tlvs_ss = self.node_signer.ecdh(
+ Recipient::Node, &b.inbound_blinding_point, None
+ ).unwrap().secret_bytes();
+ onion_utils::next_hop_pubkey(
+ &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
+ ).ok()
+ });
+
+ // Forward the HTLC over the most appropriate channel with the corresponding peer,
+ // applying non-strict forwarding.
+ // The channel with the least amount of outbound liquidity will be used to maximize the
+ // probability of being able to successfully forward a subsequent HTLC.
+ let maybe_optimal_channel = peer_state.channel_by_id.values_mut().filter_map(|phase| match phase {
+ ChannelPhase::Funded(chan) => {
+ let balances = chan.context.get_available_balances(&self.fee_estimator);
+ if outgoing_amt_msat <= balances.next_outbound_htlc_limit_msat &&
+ outgoing_amt_msat >= balances.next_outbound_htlc_minimum_msat &&
+ chan.context.is_usable() {
+ Some((chan, balances))
+ } else {
+ None
+ }
},
- }) => {
- let logger = WithChannelContext::from(&self.logger, &chan.context, Some(payment_hash));
- log_trace!(logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, &payment_hash, short_chan_id);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- user_channel_id: Some(prev_user_channel_id),
- channel_id: prev_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- // Phantom payments are only PendingHTLCRouting::Receive.
- phantom_shared_secret: None,
- blinded_failure: blinded.map(|b| b.failure),
- });
- let next_blinding_point = blinded.and_then(|b| {
- let encrypted_tlvs_ss = self.node_signer.ecdh(
- Recipient::Node, &b.inbound_blinding_point, None
- ).unwrap().secret_bytes();
- onion_utils::next_hop_pubkey(
- &self.secp_ctx, b.inbound_blinding_point, &encrypted_tlvs_ss
- ).ok()
- });
- if let Err(e) = chan.queue_add_htlc(outgoing_amt_msat,
- payment_hash, outgoing_cltv_value, htlc_source.clone(),
- onion_packet, skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
- &&logger)
- {
- if let ChannelError::Ignore(msg) = e {
- log_trace!(logger, "Failed to forward HTLC with payment_hash {}: {}", &payment_hash, msg);
+ _ => None,
+ }).min_by_key(|(_, balances)| balances.next_outbound_htlc_limit_msat).map(|(c, _)| c);
+ let optimal_channel = match maybe_optimal_channel {
+ Some(chan) => chan,
+ None => {
+ // Fall back to the specified channel to return an appropriate error.
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ chan
} else {
- panic!("Stated return value requirements in send_htlc() were not met");
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
+ }
+ };
+
+ let logger = WithChannelContext::from(&self.logger, &optimal_channel.context, Some(payment_hash));
+ let channel_description = if optimal_channel.context.get_short_channel_id() == Some(short_chan_id) {
+ "specified"
+ } else {
+ "alternate"
+ };
+ log_trace!(logger, "Forwarding HTLC from SCID {} with payment_hash {} and next hop SCID {} over {} channel {} with corresponding peer {}",
+ prev_short_channel_id, &payment_hash, short_chan_id, channel_description, optimal_channel.context.channel_id(), &counterparty_node_id);
+ if let Err(e) = optimal_channel.queue_add_htlc(outgoing_amt_msat,
+ payment_hash, outgoing_cltv_value, htlc_source.clone(),
+ onion_packet.clone(), skimmed_fee_msat, next_blinding_point, &self.fee_estimator,
+ &&logger)
+ {
+ if let ChannelError::Ignore(msg) = e {
+ log_trace!(logger, "Failed to forward HTLC with payment_hash {} to peer {}: {}", &payment_hash, &counterparty_node_id, msg);
+ } else {
+ panic!("Stated return value requirements in send_htlc() were not met");
+ }
+
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan);
failed_forwards.push((htlc_source, payment_hash,
HTLCFailReason::reason(failure_code, data),
HTLCDestination::NextHopChannel { node_id: Some(chan.context.get_counterparty_node_id()), channel_id: forward_chan_id }
));
- continue;
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
}
- None
- },
- HTLCForwardInfo::AddHTLC { .. } => {
- panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
- },
- HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
+ }
+ None
+ },
+ HTLCForwardInfo::AddHTLC { .. } => {
+ panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
+ },
+ HTLCForwardInfo::FailHTLC { htlc_id, ref err_packet } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
- Some((chan.queue_fail_htlc(htlc_id, err_packet, &&logger), htlc_id))
- },
- HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ Some((chan.queue_fail_htlc(htlc_id, err_packet.clone(), &&logger), htlc_id))
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ HTLCForwardInfo::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failing malformed HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
let res = chan.queue_fail_malformed_htlc(
htlc_id, failure_code, sha256_of_onion, &&logger
);
Some((res, htlc_id))
- },
- };
- if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
- if let Err(e) = queue_fail_htlc_res {
- if let ChannelError::Ignore(msg) = e {
+ } else {
+ forwarding_channel_not_found!(core::iter::once(forward_info).chain(draining_pending_forwards));
+ break;
+ }
+ },
+ };
+ if let Some((queue_fail_htlc_res, htlc_id)) = queue_fail_htlc_res {
+ if let Err(e) = queue_fail_htlc_res {
+ if let ChannelError::Ignore(msg) = e {
+ if let Some(ChannelPhase::Funded(ref mut chan)) = peer_state.channel_by_id.get_mut(&forward_chan_id) {
+ let logger = WithChannelContext::from(&self.logger, &chan.context, None);
log_trace!(logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
- } else {
- panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
- // fail-backs are best-effort, we probably already have one
- // pending, and if not that's OK, if not, the channel is on
- // the chain and sending the HTLC-Timeout is their problem.
- continue;
+ } else {
+ panic!("Stated return value requirements in queue_fail_{{malformed_}}htlc() were not met");
}
+ // fail-backs are best-effort, we probably already have one
+ // pending, and if not that's OK, if not, the channel is on
+ // the chain and sending the HTLC-Timeout is their problem.
+ continue;
}
}
- } else {
- forwarding_channel_not_found!();
- continue;
}
} else {
'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) {
}
if valid_mpp {
for htlc in sources.drain(..) {
- let prev_hop_chan_id = htlc.prev_hop.channel_id;
- if let Err((pk, err)) = self.claim_funds_from_hop(
+ self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_, definitely_duplicate| {
debug_assert!(!definitely_duplicate, "We shouldn't claim duplicatively from a payment");
Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash })
}
- ) {
- if let msgs::ErrorAction::IgnoreError = err.err.action {
- // We got a temporary failure updating monitor, but will claim the
- // HTLC when the monitor updating is restored (or on chain).
- let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id), Some(payment_hash));
- log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
- } else { errs.push((pk, err)); }
- }
+ );
}
}
if !valid_mpp {
}
}
- fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(&self,
- prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
- -> Result<(), (PublicKey, MsgHandleErrInternal)> {
+ fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>, bool) -> Option<MonitorUpdateCompletionAction>>(
+ &self, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage,
+ completion_action: ComplFunc,
+ ) {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
// If we haven't yet run background events assume we're still deserializing and shouldn't
let action = if let Some(action) = completion_action(None, true) {
action
} else {
- return Ok(());
+ return;
};
mem::drop(peer_state_lock);
} else {
debug_assert!(false,
"Duplicate claims should always free another channel immediately");
- return Ok(());
+ return;
};
if let Some(peer_state_mtx) = per_peer_state.get(&node_id) {
let mut peer_state = peer_state_mtx.lock().unwrap();
}
}
}
- return Ok(());
+ return;
}
}
}
// generally always allowed to be duplicative (and it's specifically noted in
// `PaymentForwarded`).
self.handle_monitor_update_completion_actions(completion_action(None, false));
- Ok(())
}
fn finalize_claims(&self, sources: Vec<HTLCSource>) {
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(hop_data, payment_preimage,
+ self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
if let Some(node_id) = next_channel_counterparty_node_id {
})
}
});
- if let Err((pk, err)) = res {
- let result: Result<(), _> = Err(err);
- let _ = handle_error!(self, result, pk);
- }
},
}
}
use crate::util::logger::Logger;
use crate::util::scid_utils;
use crate::util::test_channel_signer::TestChannelSigner;
+#[cfg(test)]
+use crate::util::test_channel_signer::SignerOp;
use crate::util::test_utils;
use crate::util::test_utils::{panicking, TestChainMonitor, TestScorer, TestKeysInterface};
use crate::util::ser::{ReadableArgs, Writeable};
pub fn get_block_header(&self, height: u32) -> Header {
self.blocks.lock().unwrap()[height as usize].0.header
}
- /// Changes the channel signer's availability for the specified peer and channel.
+
+ /// Toggles this node's signer to be available for the given signer operation.
+ /// This is useful for testing behavior for restoring an async signer that previously
+ /// could not return a signature immediately.
+ #[cfg(test)]
+ pub fn enable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) {
+ self.set_channel_signer_ops(peer_id, chan_id, signer_op, true);
+ }
+
+ /// Toggles this node's signer to be unavailable, returning `Err` for the given signer operation.
+ /// This is useful for testing behavior for an async signer that cannot return a signature
+ /// immediately.
+ #[cfg(test)]
+ pub fn disable_channel_signer_op(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp) {
+ self.set_channel_signer_ops(peer_id, chan_id, signer_op, false);
+ }
+
+ /// Changes the channel signer's availability for the specified peer, channel, and signer
+ /// operation.
///
- /// When `available` is set to `true`, the channel signer will behave normally. When set to
- /// `false`, the channel signer will act like an off-line remote signer and will return `Err` for
- /// several of the signing methods. Currently, only `get_per_commitment_point` and
- /// `release_commitment_secret` are affected by this setting.
+ /// For the specified signer operation, when `available` is set to `true`, the channel signer
+ /// will behave normally, returning `Ok`. When set to `false`, and the channel signer will
+ /// act like an off-line remote signer, returning `Err`. This applies to the signer in all
+ /// relevant places, i.e. the channel manager, chain monitor, and the keys manager.
#[cfg(test)]
- pub fn set_channel_signer_available(&self, peer_id: &PublicKey, chan_id: &ChannelId, available: bool) {
+ fn set_channel_signer_ops(&self, peer_id: &PublicKey, chan_id: &ChannelId, signer_op: SignerOp, available: bool) {
use crate::sign::ChannelSigner;
log_debug!(self.logger, "Setting channel signer for {} as available={}", chan_id, available);
let per_peer_state = self.node.per_peer_state.read().unwrap();
- let chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap();
+ let mut chan_lock = per_peer_state.get(peer_id).unwrap().lock().unwrap();
let mut channel_keys_id = None;
- if let Some(chan) = chan_lock.channel_by_id.get(chan_id).map(|phase| phase.context()) {
- chan.get_signer().as_ecdsa().unwrap().set_available(available);
+ if let Some(chan) = chan_lock.channel_by_id.get_mut(chan_id).map(|phase| phase.context_mut()) {
+ let signer = chan.get_mut_signer().as_mut_ecdsa().unwrap();
+ if available {
+ signer.enable_op(signer_op);
+ } else {
+ signer.disable_op(signer_op);
+ }
channel_keys_id = Some(chan.channel_keys_id);
}
- let mut monitor = None;
- for (funding_txo, channel_id) in self.chain_monitor.chain_monitor.list_monitors() {
- if *chan_id == channel_id {
- monitor = self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok();
- }
- }
+ let monitor = self.chain_monitor.chain_monitor.list_monitors().into_iter()
+ .find(|(_, channel_id)| *channel_id == *chan_id)
+ .and_then(|(funding_txo, _)| self.chain_monitor.chain_monitor.get_monitor(funding_txo).ok());
if let Some(monitor) = monitor {
- monitor.do_signer_call(|signer| {
+ monitor.do_mut_signer_call(|signer| {
channel_keys_id = channel_keys_id.or(Some(signer.inner.channel_keys_id()));
- signer.set_available(available)
+ if available {
+ signer.enable_op(signer_op);
+ } else {
+ signer.disable_op(signer_op);
+ }
});
}
+ let channel_keys_id = channel_keys_id.unwrap();
+ let mut unavailable_signers_ops = self.keys_manager.unavailable_signers_ops.lock().unwrap();
+ let entry = unavailable_signers_ops.entry(channel_keys_id).or_insert(new_hash_set());
if available {
- self.keys_manager.unavailable_signers.lock().unwrap()
- .remove(channel_keys_id.as_ref().unwrap());
+ entry.remove(&signer_op);
+ if entry.is_empty() {
+ unavailable_signers_ops.remove(&channel_keys_id);
+ }
} else {
- self.keys_manager.unavailable_signers.lock().unwrap()
- .insert(channel_keys_id.unwrap());
- }
+ entry.insert(signer_op);
+ };
}
}
);
}
-/// Fails creating an offer when a blinded path cannot be created without exposing the node's id.
+/// Checks that an offer can be created using an unannounced node as a blinded path's introduction
+/// node. This is only preferred if there are no other options which may indicated either the offer
+/// is intended for the unannounced node or that the node is actually announced (e.g., an LSP) but
+/// the recipient doesn't have a network graph.
#[test]
-fn fails_creating_offer_without_blinded_paths() {
+fn creates_offer_with_blinded_path_using_unannounced_introduction_node() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
- match nodes[0].node.create_offer_builder(None) {
- Ok(_) => panic!("Expected error"),
- Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
+ let offer = alice.node
+ .create_offer_builder(None).unwrap()
+ .amount_msats(10_000_000)
+ .build().unwrap();
+ assert_ne!(offer.signing_pubkey(), Some(alice_id));
+ assert!(!offer.paths().is_empty());
+ for path in offer.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
}
+
+ let payment_id = PaymentId([1; 32]);
+ bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None).unwrap();
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
+
+ let onion_message = bob.onion_messenger.next_onion_message_for_peer(alice_id).unwrap();
+ alice.onion_messenger.handle_onion_message(&bob_id, &onion_message);
+
+ let (invoice_request, reply_path) = extract_invoice_request(alice, &onion_message);
+ let payment_context = PaymentContext::Bolt12Offer(Bolt12OfferContext {
+ offer_id: offer.id(),
+ invoice_request: InvoiceRequestFields {
+ payer_id: invoice_request.payer_id(),
+ quantity: None,
+ payer_note_truncated: None,
+ },
+ });
+ assert_ne!(invoice_request.payer_id(), bob_id);
+ assert_eq!(reply_path.introduction_node, IntroductionNode::NodeId(alice_id));
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+ bob.onion_messenger.handle_onion_message(&alice_id, &onion_message);
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
+
+ route_bolt12_payment(bob, &[alice], &invoice);
+ expect_recent_payment!(bob, RecentPaymentDetails::Pending, payment_id);
+
+ claim_bolt12_payment(bob, &[alice], payment_context);
+ expect_recent_payment!(bob, RecentPaymentDetails::Fulfilled, payment_id);
}
-/// Fails creating a refund when a blinded path cannot be created without exposing the node's id.
+/// Checks that a refund can be created using an unannounced node as a blinded path's introduction
+/// node. This is only preferred if there are no other options which may indicated either the refund
+/// is intended for the unannounced node or that the node is actually announced (e.g., an LSP) but
+/// the sender doesn't have a network graph.
#[test]
-fn fails_creating_refund_without_blinded_paths() {
+fn creates_refund_with_blinded_path_using_unannounced_introduction_node() {
let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
create_unannounced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+ let alice = &nodes[0];
+ let alice_id = alice.node.get_our_node_id();
+ let bob = &nodes[1];
+ let bob_id = bob.node.get_our_node_id();
+
let absolute_expiry = Duration::from_secs(u64::MAX);
let payment_id = PaymentId([1; 32]);
-
- match nodes[0].node.create_refund_builder(
- 10_000, absolute_expiry, payment_id, Retry::Attempts(0), None
- ) {
- Ok(_) => panic!("Expected error"),
- Err(e) => assert_eq!(e, Bolt12SemanticError::MissingPaths),
+ let refund = bob.node
+ .create_refund_builder(10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None)
+ .unwrap()
+ .build().unwrap();
+ assert_ne!(refund.payer_id(), bob_id);
+ assert!(!refund.paths().is_empty());
+ for path in refund.paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(alice_id));
}
+ expect_recent_payment!(bob, RecentPaymentDetails::AwaitingInvoice, payment_id);
- assert!(nodes[0].node.list_recent_payments().is_empty());
+ let expected_invoice = alice.node.request_refund_payment(&refund).unwrap();
+
+ let onion_message = alice.onion_messenger.next_onion_message_for_peer(bob_id).unwrap();
+
+ let invoice = extract_invoice(bob, &onion_message);
+ assert_eq!(invoice, expected_invoice);
+ assert_ne!(invoice.signing_pubkey(), alice_id);
+ assert!(!invoice.payment_paths().is_empty());
+ for (_, path) in invoice.payment_paths() {
+ assert_eq!(path.introduction_node, IntroductionNode::NodeId(bob_id));
+ }
}
/// Fails creating or paying an offer when a blinded path cannot be created because no peers are
}
}
-/// Fails creating an invoice request when a blinded reply path cannot be created without exposing
-/// the node's id.
+/// Fails creating an invoice request when a blinded reply path cannot be created.
#[test]
fn fails_creating_invoice_request_without_blinded_reply_path() {
let chanmon_cfgs = create_chanmon_cfgs(6);
let (alice, bob, charlie, david) = (&nodes[0], &nodes[1], &nodes[2], &nodes[3]);
disconnect_peers(alice, &[charlie, david, &nodes[4], &nodes[5]]);
- disconnect_peers(david, &[bob, &nodes[4], &nodes[5]]);
+ disconnect_peers(david, &[bob, charlie, &nodes[4], &nodes[5]]);
let offer = alice.node
.create_offer_builder(None).unwrap()
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::secp256k1::{self, Secp256k1, SecretKey};
-use crate::sign::{EntropySource, NodeSigner, Recipient};
+use crate::blinded_path::{IntroductionNode, NodeIdLookUp};
+use crate::blinded_path::payment::advance_path_by_one;
use crate::events::{self, PaymentFailureReason};
use crate::ln::types::{PaymentHash, PaymentPreimage, PaymentSecret};
use crate::ln::channel_state::ChannelDetails;
use crate::ln::onion_utils::{DecodedOnionFailure, HTLCFailReason};
use crate::offers::invoice::Bolt12Invoice;
use crate::routing::router::{BlindedTail, InFlightHtlcs, Path, PaymentParameters, Route, RouteParameters, Router};
+use crate::sign::{EntropySource, NodeSigner, Recipient};
use crate::util::errors::APIError;
use crate::util::logger::Logger;
use crate::util::time::Time;
keysend_preimage: Option<PaymentPreimage>,
custom_tlvs: Vec<(u64, Vec<u8>)>,
pending_amt_msat: u64,
- /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+.
+ /// Used to track the fee paid. Present iff the payment was serialized on 0.0.103+.
pending_fee_msat: Option<u64>,
/// The total payment amount across all paths, used to verify that a retry is not overpaying.
total_msat: u64,
}
}
- pub(super) fn send_payment_for_bolt12_invoice<R: Deref, ES: Deref, NS: Deref, IH, SP, L: Deref>(
+ pub(super) fn send_payment_for_bolt12_invoice<
+ R: Deref, ES: Deref, NS: Deref, NL: Deref, IH, SP, L: Deref
+ >(
&self, invoice: &Bolt12Invoice, payment_id: PaymentId, router: &R,
first_hops: Vec<ChannelDetails>, inflight_htlcs: IH, entropy_source: &ES, node_signer: &NS,
- best_block_height: u32, logger: &L,
+ node_id_lookup: &NL, secp_ctx: &Secp256k1<secp256k1::All>, best_block_height: u32,
+ logger: &L,
pending_events: &Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
send_payment_along_path: SP,
) -> Result<(), Bolt12PaymentError>
R::Target: Router,
ES::Target: EntropySource,
NS::Target: NodeSigner,
+ NL::Target: NodeIdLookUp,
L::Target: Logger,
IH: Fn() -> InFlightHtlcs,
SP: Fn(SendAlongPathArgs) -> Result<(), APIError>,
hash_map::Entry::Vacant(_) => return Err(Bolt12PaymentError::UnexpectedInvoice),
};
- let pay_params = PaymentParameters::from_bolt12_invoice(&invoice);
+ let mut payment_params = PaymentParameters::from_bolt12_invoice(&invoice);
+
+ // Advance any blinded path where the introduction node is our node.
+ if let Ok(our_node_id) = node_signer.get_node_id(Recipient::Node) {
+ for (_, path) in payment_params.payee.blinded_route_hints_mut().iter_mut() {
+ let introduction_node_id = match path.introduction_node {
+ IntroductionNode::NodeId(pubkey) => pubkey,
+ IntroductionNode::DirectedShortChannelId(direction, scid) => {
+ match node_id_lookup.next_node_id(scid) {
+ Some(next_node_id) => *direction.select_pubkey(&our_node_id, &next_node_id),
+ None => continue,
+ }
+ },
+ };
+ if introduction_node_id == our_node_id {
+ let _ = advance_path_by_one(path, node_signer, node_id_lookup, secp_ctx);
+ }
+ }
+ }
+
let amount_msat = invoice.amount_msats();
- let mut route_params = RouteParameters::from_payment_params_and_value(pay_params, amount_msat);
+ let mut route_params = RouteParameters::from_payment_params_and_value(
+ payment_params, amount_msat
+ );
if let Some(max_fee_msat) = max_total_routing_fee_msat {
route_params.max_total_routing_fee_msat = Some(max_fee_msat);
}
use core::time::Duration;
+ use crate::blinded_path::EmptyNodeIdLookUp;
use crate::events::{Event, PathFailure, PaymentFailureReason};
use crate::ln::types::PaymentHash;
use crate::ln::channelmanager::{PaymentId, RecipientOnionFields};
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
+ let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Ok(()),
);
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
+ let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Ok(()),
);
let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, &logger));
let scorer = RwLock::new(test_utils::TestScorer::new());
let router = test_utils::TestRouter::new(network_graph, &logger, &scorer);
+ let secp_ctx = Secp256k1::new();
let keys_manager = test_utils::TestKeysInterface::new(&[0; 32], Network::Testnet);
let pending_events = Mutex::new(VecDeque::new());
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Err(Bolt12PaymentError::UnexpectedInvoice),
);
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| Ok(())
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| Ok(())
),
Ok(()),
);
assert_eq!(
outbound_payments.send_payment_for_bolt12_invoice(
&invoice, payment_id, &&router, vec![], || InFlightHtlcs::new(), &&keys_manager,
- &&keys_manager, 0, &&logger, &pending_events, |_| panic!()
+ &&keys_manager, &EmptyNodeIdLookUp {}, &secp_ctx, 0, &&logger, &pending_events,
+ |_| panic!()
),
Err(Bolt12PaymentError::DuplicateInvoice),
);
fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) {
let chanmon_cfgs = create_chanmon_cfgs(3);
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let max_in_flight_percent = 10;
let mut intercept_forwards_config = test_default_channel_config();
intercept_forwards_config.accept_intercept_htlcs = true;
+ intercept_forwards_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent;
let mut underpay_config = test_default_channel_config();
underpay_config.channel_config.accept_underpaying_htlcs = true;
+ underpay_config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = max_in_flight_percent;
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, Some(intercept_forwards_config), Some(underpay_config)]);
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let amt_msat = 900_000;
+
let mut chan_ids = Vec::new();
for _ in 0..num_mpp_parts {
- let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000, 0);
- let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, 2_000_000, 0).0.channel_id;
+ // We choose the channel size so that there can be at most one part pending on each channel.
+ let channel_size = amt_msat / 1000 / num_mpp_parts as u64 * 100 / max_in_flight_percent as u64 + 100;
+ let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_size, 0);
+ let channel_id = create_unannounced_chan_between_nodes_with_value(&nodes, 1, 2, channel_size, 0).0.channel_id;
chan_ids.push(channel_id);
}
// Send the initial payment.
- let amt_msat = 900_000;
let skimmed_fee_msat = 20;
let mut route_hints = Vec::new();
for _ in 0..num_mpp_parts {
// Create a new channel between C and D as A will refuse to retry on the existing one because
// it just failed.
- let chan_id_cd_2 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).2;
+ create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
// Now retry the failed HTLC.
nodes[0].node.process_pending_htlc_forwards();
expect_pending_htlcs_forwardable!(nodes[2]);
check_added_monitors(&nodes[2], 1);
let cs_forward = SendEvent::from_node(&nodes[2]);
+ let cd_channel_used = cs_forward.msgs[0].channel_id;
nodes[3].node.handle_update_add_htlc(&nodes[2].node.get_our_node_id(), &cs_forward.msgs[0]);
commitment_signed_dance!(nodes[3], nodes[2], cs_forward.commitment_msg, false, true);
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &ds_fail.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[2], nodes[3], ds_fail.commitment_signed, false, true);
expect_pending_htlcs_forwardable_conditions(nodes[2].node.get_and_clear_pending_events(),
- &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_id_cd_2 }]);
+ &[HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: cd_channel_used }]);
} else {
expect_pending_htlcs_forwardable!(nodes[3]);
expect_payment_claimable!(nodes[3], payment_hash, payment_secret, amt_msat);
_ => panic!()
}
}
+
+#[test]
+fn test_non_strict_forwarding() {
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let mut config = test_default_channel_config();
+ config.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 100;
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[Some(config), Some(config), Some(config)]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Create a routing node with two outbound channels, each of which can forward 2 payments of
+ // the given value.
+ let payment_value = 1_500_000;
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0);
+ let (chan_update_1, _, channel_id_1, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 4_950, 0);
+ let (chan_update_2, _, channel_id_2, _) = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 5_000, 0);
+
+ // Create a route once.
+ let payment_params = PaymentParameters::from_node_id(nodes[2].node.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_bolt11_features(nodes[2].node.bolt11_invoice_features()).unwrap();
+ let route_params = RouteParameters::from_payment_params_and_value(payment_params, payment_value);
+ let route = functional_test_utils::get_route(&nodes[0], &route_params).unwrap();
+
+ // Send 4 payments over the same route.
+ for i in 0..4 {
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None);
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ let mut send_event = SendEvent::from_event(msg_events.remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ msg_events = nodes[1].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ send_event = SendEvent::from_event(msg_events.remove(0));
+ // The HTLC will be forwarded over the most appropriate channel with the corresponding peer,
+ // applying non-strict forwarding.
+ // The channel with the least amount of outbound liquidity will be used to maximize the
+ // probability of being able to successfully forward a subsequent HTLC.
+ assert_eq!(send_event.msgs[0].channel_id, if i < 2 {
+ channel_id_1
+ } else {
+ channel_id_2
+ });
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[2], nodes[1], &send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[2]);
+ let events = nodes[2].node.get_and_clear_pending_events();
+ assert_eq!(events.len(), 1);
+ assert!(matches!(events[0], Event::PaymentClaimable { .. }));
+
+ claim_payment_along_route(
+ ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage)
+ );
+ }
+
+ // Send a 5th payment which will fail.
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(payment_value), None);
+ nodes[0].node.send_payment_with_route(&route, payment_hash,
+ RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
+ check_added_monitors!(nodes[0], 1);
+ let mut msg_events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(msg_events.len(), 1);
+ let mut send_event = SendEvent::from_event(msg_events.remove(0));
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_event.msgs[0]);
+ commitment_signed_dance!(nodes[1], nodes[0], &send_event.commitment_msg, false);
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+ let routed_scid = route.paths[0].hops[1].short_channel_id;
+ let routed_channel_id = match routed_scid {
+ scid if scid == chan_update_1.contents.short_channel_id => channel_id_1,
+ scid if scid == chan_update_2.contents.short_channel_id => channel_id_2,
+ _ => panic!("Unexpected short channel id in route"),
+ };
+ // The failure to forward will refer to the channel given in the onion.
+ expect_pending_htlcs_forwardable_conditions(nodes[1].node.get_and_clear_pending_events(),
+ &[HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: routed_channel_id }]);
+
+ let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false);
+ let events = nodes[0].node.get_and_clear_pending_events();
+ expect_payment_failed_conditions_event(events, payment_hash, false, PaymentFailedConditions::new().blamed_scid(routed_scid));
+}
let path_id = Some([2; 32]);
let reply_path = BlindedPath::new_for_message(&[], bob.node_id, &*bob.entropy_source, &secp_ctx).unwrap();
- // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced.
- // Therefore, the reply_path cannot be used for the response.
+ // Alice tries to asynchronously respond to Bob, but fails because the nodes are unannounced and
+ // disconnected. Thus, a reply path could no be created for the response.
+ disconnect_peers(alice, bob);
let responder = Responder::new(reply_path, path_id);
alice.custom_message_handler.expect_message_and_response(message.clone());
let response_instruction = alice.custom_message_handler.handle_custom_message(message, Some(responder));
}
fn create_blinded_paths_from_iter<
- I: Iterator<Item = ForwardNode>,
+ I: ExactSizeIterator<Item = ForwardNode>,
T: secp256k1::Signing + secp256k1::Verification
>(
&self, recipient: PublicKey, peers: I, secp_ctx: &Secp256k1<T>, compact_paths: bool
let is_recipient_announced =
network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient));
+ let has_one_peer = peers.len() == 1;
let mut peer_info = peers
- // Limit to peers with announced channels
+ // Limit to peers with announced channels unless the recipient is unannounced.
.filter_map(|peer|
network_graph
.node(&NodeId::from_pubkey(&peer.node_id))
- .filter(|info| info.channels.len() >= MIN_PEER_CHANNELS)
+ .filter(|info|
+ !is_recipient_announced || info.channels.len() >= MIN_PEER_CHANNELS
+ )
.map(|info| (peer, info.is_tor_only(), info.channels.len()))
+ // Allow messages directly with the only peer when unannounced.
+ .or_else(|| (!is_recipient_announced && has_one_peer)
+ .then(|| (peer, false, 0))
+ )
)
// Exclude Tor-only nodes when the recipient is announced.
.filter(|(_, is_tor_only, _)| !(*is_tor_only && is_recipient_announced))
// recipient's node_id.
const MIN_PEER_CHANNELS: usize = 3;
+ let has_one_peer = first_hops
+ .first()
+ .map(|details| details.counterparty.node_id)
+ .map(|node_id| first_hops
+ .iter()
+ .skip(1)
+ .all(|details| details.counterparty.node_id == node_id)
+ )
+ .unwrap_or(false);
+
let network_graph = self.network_graph.deref().read_only();
+ let is_recipient_announced =
+ network_graph.nodes().contains_key(&NodeId::from_pubkey(&recipient));
+
let paths = first_hops.into_iter()
.filter(|details| details.counterparty.features.supports_route_blinding())
.filter(|details| amount_msats <= details.inbound_capacity_msat)
.filter(|details| amount_msats >= details.inbound_htlc_minimum_msat.unwrap_or(0))
.filter(|details| amount_msats <= details.inbound_htlc_maximum_msat.unwrap_or(u64::MAX))
+ // Limit to peers with announced channels unless the recipient is unannounced.
.filter(|details| network_graph
.node(&NodeId::from_pubkey(&details.counterparty.node_id))
- .map(|node_info| node_info.channels.len() >= MIN_PEER_CHANNELS)
- .unwrap_or(false)
+ .map(|node| !is_recipient_announced || node.channels.len() >= MIN_PEER_CHANNELS)
+ // Allow payments directly with the only peer when unannounced.
+ .unwrap_or(!is_recipient_announced && has_one_peer)
)
.filter_map(|details| {
let short_channel_id = match details.get_inbound_payment_scid() {
}
}
+ pub(crate) fn blinded_route_hints_mut(&mut self) -> &mut [(BlindedPayInfo, BlindedPath)] {
+ match self {
+ Self::Blinded { route_hints, .. } => &mut route_hints[..],
+ Self::Clear { .. } => &mut []
+ }
+ }
+
fn unblinded_route_hints(&self) -> &[RouteHint] {
match self {
Self::Blinded { .. } => &[],
#[allow(unused_imports)]
use crate::prelude::*;
-use core::cmp;
+use core::{cmp, fmt};
use crate::sync::{Mutex, Arc};
#[cfg(test)] use crate::sync::MutexGuard;
/// Channel state used for policy enforcement
pub state: Arc<Mutex<EnforcementState>>,
pub disable_revocation_policy_check: bool,
- /// When `true` (the default), the signer will respond immediately with signatures. When `false`,
- /// the signer will return an error indicating that it is unavailable.
- pub available: Arc<Mutex<bool>>,
+ /// Set of signer operations that are disabled. If an operation is disabled,
+ /// the signer will return `Err` when the corresponding method is called.
+ pub disabled_signer_ops: Arc<Mutex<HashSet<SignerOp>>>,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum SignerOp {
+ GetPerCommitmentPoint,
+ ReleaseCommitmentSecret,
+ ValidateHolderCommitment,
+ SignCounterpartyCommitment,
+ ValidateCounterpartyRevocation,
+ SignHolderCommitment,
+ SignJusticeRevokedOutput,
+ SignJusticeRevokedHtlc,
+ SignHolderHtlcTransaction,
+ SignCounterpartyHtlcTransaction,
+ SignClosingTransaction,
+ SignHolderAnchorInput,
+ SignChannelAnnouncementWithFundingKey,
+}
+
+impl fmt::Display for SignerOp {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self {
+ SignerOp::GetPerCommitmentPoint => write!(f, "get_per_commitment_point"),
+ SignerOp::ReleaseCommitmentSecret => write!(f, "release_commitment_secret"),
+ SignerOp::ValidateHolderCommitment => write!(f, "validate_holder_commitment"),
+ SignerOp::SignCounterpartyCommitment => write!(f, "sign_counterparty_commitment"),
+ SignerOp::ValidateCounterpartyRevocation => write!(f, "validate_counterparty_revocation"),
+ SignerOp::SignHolderCommitment => write!(f, "sign_holder_commitment"),
+ SignerOp::SignJusticeRevokedOutput => write!(f, "sign_justice_revoked_output"),
+ SignerOp::SignJusticeRevokedHtlc => write!(f, "sign_justice_revoked_htlc"),
+ SignerOp::SignHolderHtlcTransaction => write!(f, "sign_holder_htlc_transaction"),
+ SignerOp::SignCounterpartyHtlcTransaction => write!(f, "sign_counterparty_htlc_transaction"),
+ SignerOp::SignClosingTransaction => write!(f, "sign_closing_transaction"),
+ SignerOp::SignHolderAnchorInput => write!(f, "sign_holder_anchor_input"),
+ SignerOp::SignChannelAnnouncementWithFundingKey => write!(f, "sign_channel_announcement_with_funding_key"),
+ }
+ }
}
impl PartialEq for TestChannelSigner {
inner,
state,
disable_revocation_policy_check: false,
- available: Arc::new(Mutex::new(true)),
+ disabled_signer_ops: Arc::new(Mutex::new(new_hash_set())),
}
}
inner,
state,
disable_revocation_policy_check,
- available: Arc::new(Mutex::new(true)),
+ disabled_signer_ops: Arc::new(Mutex::new(new_hash_set())),
}
}
self.state.lock().unwrap()
}
- /// Marks the signer's availability.
- ///
- /// When `true`, methods are forwarded to the underlying signer as normal. When `false`, some
- /// methods will return `Err` indicating that the signer is unavailable. Intended to be used for
- /// testing asynchronous signing.
- pub fn set_available(&self, available: bool) {
- *self.available.lock().unwrap() = available;
+ pub fn enable_op(&mut self, signer_op: SignerOp) {
+ self.disabled_signer_ops.lock().unwrap().remove(&signer_op);
+ }
+
+ pub fn disable_op(&mut self, signer_op: SignerOp) {
+ self.disabled_signer_ops.lock().unwrap().insert(signer_op);
+ }
+
+ fn is_signer_available(&self, signer_op: SignerOp) -> bool {
+ !self.disabled_signer_ops.lock().unwrap().contains(&signer_op)
}
}
}
fn validate_counterparty_revocation(&self, idx: u64, _secret: &SecretKey) -> Result<(), ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::ValidateCounterpartyRevocation) {
return Err(());
}
let mut state = self.state.lock().unwrap();
self.verify_counterparty_commitment_tx(commitment_tx, secp_ctx);
{
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignCounterpartyCommitment) {
return Err(());
}
let mut state = self.state.lock().unwrap();
}
fn sign_holder_commitment(&self, commitment_tx: &HolderCommitmentTransaction, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignHolderCommitment) {
return Err(());
}
let trusted_tx = self.verify_holder_commitment_tx(commitment_tx, secp_ctx);
}
fn sign_justice_revoked_output(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignJusticeRevokedOutput) {
return Err(());
}
Ok(EcdsaChannelSigner::sign_justice_revoked_output(&self.inner, justice_tx, input, amount, per_commitment_key, secp_ctx).unwrap())
}
fn sign_justice_revoked_htlc(&self, justice_tx: &Transaction, input: usize, amount: u64, per_commitment_key: &SecretKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignJusticeRevokedHtlc) {
return Err(());
}
Ok(EcdsaChannelSigner::sign_justice_revoked_htlc(&self.inner, justice_tx, input, amount, per_commitment_key, htlc, secp_ctx).unwrap())
&self, htlc_tx: &Transaction, input: usize, htlc_descriptor: &HTLCDescriptor,
secp_ctx: &Secp256k1<secp256k1::All>
) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignHolderHtlcTransaction) {
return Err(());
}
let state = self.state.lock().unwrap();
}
fn sign_counterparty_htlc_transaction(&self, htlc_tx: &Transaction, input: usize, amount: u64, per_commitment_point: &PublicKey, htlc: &HTLCOutputInCommitment, secp_ctx: &Secp256k1<secp256k1::All>) -> Result<Signature, ()> {
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignCounterpartyHtlcTransaction) {
return Err(());
}
Ok(EcdsaChannelSigner::sign_counterparty_htlc_transaction(&self.inner, htlc_tx, input, amount, per_commitment_point, htlc, secp_ctx).unwrap())
// As long as our minimum dust limit is enforced and is greater than our anchor output
// value, an anchor output can only have an index within [0, 1].
assert!(anchor_tx.input[input].previous_output.vout == 0 || anchor_tx.input[input].previous_output.vout == 1);
- if !*self.available.lock().unwrap() {
+ if !self.is_signer_available(SignerOp::SignHolderAnchorInput) {
return Err(());
}
EcdsaChannelSigner::sign_holder_anchor_input(&self.inner, anchor_tx, input, secp_ctx)
use bitcoin::psbt::Psbt;
use bitcoin::Sequence;
+use super::test_channel_signer::SignerOp;
+
pub fn pubkey(byte: u8) -> PublicKey {
let secp_ctx = Secp256k1::new();
PublicKey::from_secret_key(&secp_ctx, &privkey(byte))
///
/// [`ChannelMonitor`]: channelmonitor::ChannelMonitor
pub offchain_monitor_updates: Mutex<HashMap<OutPoint, HashSet<u64>>>,
+ /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the
+ /// monitor's funding outpoint here.
+ pub chain_sync_monitor_persistences: Mutex<VecDeque<OutPoint>>
}
impl TestPersister {
pub fn new() -> Self {
Self {
update_rets: Mutex::new(VecDeque::new()),
offchain_monitor_updates: Mutex::new(new_hash_map()),
+ chain_sync_monitor_persistences: Mutex::new(VecDeque::new())
}
}
ret = update_ret;
}
- if let Some(update) = update {
+ if let Some(update) = update {
self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(new_hash_set()).insert(update.update_id);
+ } else {
+ self.chain_sync_monitor_persistences.lock().unwrap().push_back(funding_txo);
}
ret
}
fn archive_persisted_channel(&self, funding_txo: OutPoint) {
- // remove the channel from the offchain_monitor_updates map
+ // remove the channel from the offchain_monitor_updates and chain_sync_monitor_persistences.
self.offchain_monitor_updates.lock().unwrap().remove(&funding_txo);
+ self.chain_sync_monitor_persistences.lock().unwrap().retain(|x| x != &funding_txo);
}
}
enforcement_states: Mutex<HashMap<[u8;32], Arc<Mutex<EnforcementState>>>>,
expectations: Mutex<Option<VecDeque<OnGetShutdownScriptpubkey>>>,
pub unavailable_signers: Mutex<HashSet<[u8; 32]>>,
+ pub unavailable_signers_ops: Mutex<HashMap<[u8; 32], HashSet<SignerOp>>>,
}
impl EntropySource for TestKeysInterface {
fn derive_channel_signer(&self, channel_value_satoshis: u64, channel_keys_id: [u8; 32]) -> TestChannelSigner {
let keys = self.backing.derive_channel_signer(channel_value_satoshis, channel_keys_id);
let state = self.make_enforcement_state_cell(keys.commitment_seed);
- let signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check);
- if self.unavailable_signers.lock().unwrap().contains(&channel_keys_id) {
- signer.set_available(false);
+ let mut signer = TestChannelSigner::new_with_revoked(keys, state, self.disable_revocation_policy_check);
+ if let Some(ops) = self.unavailable_signers_ops.lock().unwrap().get(&channel_keys_id) {
+ for &op in ops {
+ signer.disable_op(op);
+ }
}
signer
}
enforcement_states: Mutex::new(new_hash_map()),
expectations: Mutex::new(None),
unavailable_signers: Mutex::new(new_hash_set()),
+ unavailable_signers_ops: Mutex::new(new_hash_map()),
}
}
-./bench/benches/bench.rs
./lightning-background-processor/src/lib.rs
./lightning-block-sync/src/convert.rs
./lightning-block-sync/src/gossip.rs
./lightning-persister/src/lib.rs
./lightning-persister/src/test_utils.rs
./lightning-persister/src/utils.rs
-./lightning-rapid-gossip-sync/src/error.rs
-./lightning-rapid-gossip-sync/src/lib.rs
-./lightning-rapid-gossip-sync/src/processing.rs
./lightning/src/blinded_path/message.rs
./lightning/src/blinded_path/mod.rs
./lightning/src/blinded_path/payment.rs