echo -e "\n\nTest cfg-flag builds"
RUSTFLAGS="--cfg=taproot" cargo test --verbose --color always -p lightning
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
RUSTFLAGS="--cfg=async_signing" cargo test --verbose --color always -p lightning
+[ "$CI_MINIMIZE_DISK_USAGE" != "" ] && cargo clean
RUSTFLAGS="--cfg=dual_funding" cargo test --verbose --color always -p lightning
[features]
default = ["std"]
no-std = ["lightning/no-std"]
-std = ["bitcoin/std", "num-traits/std", "lightning/std", "bech32/std"]
+std = ["bitcoin/std", "lightning/std", "bech32/std"]
[dependencies]
bech32 = { version = "0.9.0", default-features = false }
lightning = { version = "0.0.121", path = "../lightning", default-features = false }
secp256k1 = { version = "0.27.0", default-features = false, features = ["recovery", "alloc"] }
-num-traits = { version = "0.2.8", default-features = false }
serde = { version = "1.0.118", optional = true }
bitcoin = { version = "0.30.2", default-features = false }
use lightning::routing::gossip::RoutingFees;
use lightning::routing::router::{RouteHint, RouteHintHop};
-use num_traits::{CheckedAdd, CheckedMul};
-
use secp256k1::ecdsa::{RecoveryId, RecoverableSignature};
use secp256k1::PublicKey;
if b32.len() != 7 {
return Err(Bolt11ParseError::InvalidSliceLength("PositiveTimestamp::from_base32()".into()));
}
- let timestamp: u64 = parse_int_be(b32, 32)
+ let timestamp: u64 = parse_u64_be(b32)
.expect("7*5bit < 64bit, no overflow possible");
match PositiveTimestamp::from_unix_timestamp(timestamp) {
Ok(t) => Ok(t),
}
}
-pub(crate) fn parse_int_be<T, U>(digits: &[U], base: T) -> Option<T>
- where T: CheckedAdd + CheckedMul + From<u8> + Default,
- U: Into<u8> + Copy
-{
- digits.iter().fold(Some(Default::default()), |acc, b|
- acc
- .and_then(|x| x.checked_mul(&base))
- .and_then(|x| x.checked_add(&(Into::<u8>::into(*b)).into()))
- )
-}
+macro_rules! define_parse_int_be { ($name: ident, $ty: ty) => {
+ fn $name(digits: &[u5]) -> Option<$ty> {
+ digits.iter().fold(Some(Default::default()), |acc, b|
+ acc
+ .and_then(|x| x.checked_mul(32))
+ .and_then(|x| x.checked_add((Into::<u8>::into(*b)).into()))
+ )
+ }
+} }
+define_parse_int_be!(parse_u16_be, u16);
+define_parse_int_be!(parse_u64_be, u64);
fn parse_tagged_parts(data: &[u5]) -> Result<Vec<RawTaggedField>, Bolt11ParseError> {
let mut parts = Vec::<RawTaggedField>::new();
// Ignore tag at data[0], it will be handled in the TaggedField parsers and
// parse the length to find the end of the tagged field's data
- let len = parse_int_be(&data[1..3], 32).expect("can't overflow");
+ let len = parse_u16_be(&data[1..3]).expect("can't overflow") as usize;
let last_element = 3 + len;
if data.len() < last_element {
type Err = Bolt11ParseError;
fn from_base32(field_data: &[u5]) -> Result<ExpiryTime, Bolt11ParseError> {
- match parse_int_be::<u64, u5>(field_data, 32)
+ match parse_u64_be(field_data)
.map(ExpiryTime::from_seconds)
{
Some(t) => Ok(t),
type Err = Bolt11ParseError;
fn from_base32(field_data: &[u5]) -> Result<MinFinalCltvExpiryDelta, Bolt11ParseError> {
- let expiry = parse_int_be::<u64, u5>(field_data, 32);
+ let expiry = parse_u64_be(field_data);
if let Some(expiry) = expiry {
Ok(MinFinalCltvExpiryDelta(expiry))
} else {
let hop = RouteHintHop {
src_node_id: PublicKey::from_slice(&hop_bytes[0..33])?,
- short_channel_id: parse_int_be(&channel_id, 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes(channel_id),
fees: RoutingFees {
- base_msat: parse_int_be(&hop_bytes[41..45], 256).expect("slice too big?"),
- proportional_millionths: parse_int_be(&hop_bytes[45..49], 256).expect("slice too big?"),
+ base_msat: u32::from_be_bytes(hop_bytes[41..45].try_into().expect("slice too big?")),
+ proportional_millionths: u32::from_be_bytes(hop_bytes[45..49].try_into().expect("slice too big?")),
},
- cltv_expiry_delta: parse_int_be(&hop_bytes[49..51], 256).expect("slice too big?"),
+ cltv_expiry_delta: u16::from_be_bytes(hop_bytes[49..51].try_into().expect("slice too big?")),
htlc_minimum_msat: None,
htlc_maximum_msat: None,
};
#[test]
fn test_parse_int_from_bytes_be() {
- use crate::de::parse_int_be;
-
- assert_eq!(parse_int_be::<u32, u8>(&[1, 2, 3, 4], 256), Some(16909060));
- assert_eq!(parse_int_be::<u32, u8>(&[1, 3], 32), Some(35));
- assert_eq!(parse_int_be::<u32, u8>(&[255, 255, 255, 255], 256), Some(4294967295));
- assert_eq!(parse_int_be::<u32, u8>(&[1, 0, 0, 0, 0], 256), None);
+ use crate::de::parse_u16_be;
+
+ assert_eq!(parse_u16_be(&[
+ u5::try_from_u8(1).unwrap(), u5::try_from_u8(2).unwrap(),
+ u5::try_from_u8(3).unwrap(), u5::try_from_u8(4).unwrap()]
+ ), Some(34916));
+ assert_eq!(parse_u16_be(&[
+ u5::try_from_u8(2).unwrap(), u5::try_from_u8(0).unwrap(),
+ u5::try_from_u8(0).unwrap(), u5::try_from_u8(0).unwrap()]
+ ), None);
}
#[test]
use lightning::routing::router::{RouteHint, RouteHintHop};
use crate::PrivateRoute;
use bech32::FromBase32;
- use crate::de::parse_int_be;
let input = from_bech32(
"q20q82gphp2nflc7jtzrcazrra7wwgzxqc8u7754cdlpfrmccae92qgzqvzq2ps8pqqqqqqpqqqqq9qqqvpeuqa\
0x7e, 0x14, 0x8f, 0x78, 0xc7, 0x72, 0x55
][..]
).unwrap(),
- short_channel_id: parse_int_be(&[0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08], 256).expect("short chan ID slice too big?"),
+ short_channel_id: 0x0102030405060708,
fees: RoutingFees {
base_msat: 1,
proportional_millionths: 20,
0x7e, 0x14, 0x8f, 0x78, 0xc7, 0x72, 0x55
][..]
).unwrap(),
- short_channel_id: parse_int_be(&[0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a], 256).expect("short chan ID slice too big?"),
+ short_channel_id: 0x030405060708090a,
fees: RoutingFees {
base_msat: 2,
proportional_millionths: 30,
extern crate bech32;
#[macro_use] extern crate lightning;
-extern crate num_traits;
extern crate secp256k1;
extern crate alloc;
#[cfg(any(test, feature = "std"))]
let route_1 = RouteHint(vec![
RouteHintHop {
src_node_id: public_key,
- short_channel_id: de::parse_int_be(&[123; 8], 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes([123; 8]),
fees: RoutingFees {
base_msat: 2,
proportional_millionths: 1,
},
RouteHintHop {
src_node_id: public_key,
- short_channel_id: de::parse_int_be(&[42; 8], 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes([42; 8]),
fees: RoutingFees {
base_msat: 3,
proportional_millionths: 2,
},
RouteHintHop {
src_node_id: public_key,
- short_channel_id: de::parse_int_be(&[1; 8], 256).expect("short chan ID slice too big?"),
+ short_channel_id: u64::from_be_bytes([1; 8]),
fees: RoutingFees {
base_msat: 5,
proportional_millionths: 4,
use lightning::chain::{Confirm, WatchedOutput};
+use lightning::chain::channelmonitor::ANTI_REORG_DELAY;
use bitcoin::{Txid, BlockHash, Transaction, OutPoint};
use bitcoin::block::Header;
// Outputs that were previously processed, but must not be forgotten yet as
// as we still need to monitor any spends on-chain.
pub watched_outputs: HashMap<OutPoint, WatchedOutput>,
+ // Outputs for which we previously saw a spend on-chain but kept around until the spends reach
+ // sufficient depth.
+ pub outputs_spends_pending_threshold_conf: Vec<(Txid, u32, OutPoint, WatchedOutput)>,
// The tip hash observed during our last sync.
pub last_sync_hash: Option<BlockHash>,
// Indicates whether we need to resync, e.g., after encountering an error.
Self {
watched_transactions: HashSet::new(),
watched_outputs: HashMap::new(),
+ outputs_spends_pending_threshold_conf: Vec::new(),
last_sync_hash: None,
pending_sync: false,
}
}
self.watched_transactions.insert(txid);
+
+ // If a previously-confirmed output spend is unconfirmed, re-add the watched output to
+ // the tracking map.
+ self.outputs_spends_pending_threshold_conf.retain(|(conf_txid, _, prev_outpoint, output)| {
+ if txid == *conf_txid {
+ self.watched_outputs.insert(*prev_outpoint, output.clone());
+ false
+ } else {
+ true
+ }
+ })
}
}
self.watched_transactions.remove(&ctx.tx.txid());
for input in &ctx.tx.input {
- self.watched_outputs.remove(&input.previous_output);
+ if let Some(output) = self.watched_outputs.remove(&input.previous_output) {
+ self.outputs_spends_pending_threshold_conf.push((ctx.tx.txid(), ctx.block_height, input.previous_output, output));
+ }
}
}
}
+
+ pub fn prune_output_spends(&mut self, cur_height: u32) {
+ self.outputs_spends_pending_threshold_conf.retain(|(_, conf_height, _, _)| {
+ cur_height < conf_height + ANTI_REORG_DELAY - 1
+ });
+ }
}
#[derive(Debug)]
pub(crate) struct ConfirmedTx {
pub tx: Transaction,
+ pub txid: Txid,
pub block_header: Header,
pub block_height: u32,
pub pos: usize,
for c in &confirmables {
c.best_block_updated(&tip_header, tip_height);
}
+
+ // Prune any sufficiently confirmed output spends
+ sync_state.prune_output_spends(tip_height);
}
match self.get_confirmed_transactions(&sync_state) {
// First, check the confirmation status of registered transactions as well as the
// status of dependent transactions of registered outputs.
- let mut confirmed_txs = Vec::new();
+ let mut confirmed_txs: Vec<ConfirmedTx> = Vec::new();
let mut watched_script_pubkeys = Vec::with_capacity(
sync_state.watched_transactions.len() + sync_state.watched_outputs.len());
let mut watched_txs = Vec::with_capacity(sync_state.watched_transactions.len());
for (i, script_history) in tx_results.iter().enumerate() {
let (txid, tx) = &watched_txs[i];
+ if confirmed_txs.iter().any(|ctx| ctx.txid == **txid) {
+ continue;
+ }
let mut filtered_history = script_history.iter().filter(|h| h.tx_hash == **txid);
if let Some(history) = filtered_history.next()
{
}
let txid = possible_output_spend.tx_hash;
+ if confirmed_txs.iter().any(|ctx| ctx.txid == txid) {
+ continue;
+ }
+
match self.client.transaction_get(&txid) {
Ok(tx) => {
let mut is_spend = false;
}
let confirmed_tx = ConfirmedTx {
tx: tx.clone(),
+ txid,
block_header, block_height: prob_conf_height,
pos,
};
}
}
- match maybe_await!(self.sync_best_block_updated(&confirmables, &tip_hash)) {
+ match maybe_await!(self.sync_best_block_updated(&confirmables, &mut sync_state, &tip_hash)) {
Ok(()) => {}
Err(InternalError::Inconsistency) => {
// Immediately restart syncing when we encounter any inconsistencies.
#[maybe_async]
fn sync_best_block_updated(
- &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, tip_hash: &BlockHash,
+ &self, confirmables: &Vec<&(dyn Confirm + Sync + Send)>, sync_state: &mut SyncState, tip_hash: &BlockHash,
) -> Result<(), InternalError> {
// Inform the interface of the new block.
for c in confirmables {
c.best_block_updated(&tip_header, tip_height);
}
+
+ // Prune any sufficiently confirmed output spends
+ sync_state.prune_output_spends(tip_height);
}
} else {
return Err(InternalError::Inconsistency);
// First, check the confirmation status of registered transactions as well as the
// status of dependent transactions of registered outputs.
- let mut confirmed_txs = Vec::new();
+ let mut confirmed_txs: Vec<ConfirmedTx> = Vec::new();
for txid in &sync_state.watched_transactions {
- if let Some(confirmed_tx) = maybe_await!(self.get_confirmed_tx(&txid, None, None))? {
+ if confirmed_txs.iter().any(|ctx| ctx.txid == *txid) {
+ continue;
+ }
+ if let Some(confirmed_tx) = maybe_await!(self.get_confirmed_tx(*txid, None, None))? {
confirmed_txs.push(confirmed_tx);
}
}
{
if let Some(spending_txid) = output_status.txid {
if let Some(spending_tx_status) = output_status.status {
+ if confirmed_txs.iter().any(|ctx| ctx.txid == spending_txid) {
+ if spending_tx_status.confirmed {
+ // Skip inserting duplicate ConfirmedTx entry
+ continue;
+ } else {
+ log_trace!(self.logger, "Inconsistency: Detected previously-confirmed Tx {} as unconfirmed", spending_txid);
+ return Err(InternalError::Inconsistency);
+ }
+ }
+
if let Some(confirmed_tx) = maybe_await!(self
.get_confirmed_tx(
- &spending_txid,
+ spending_txid,
spending_tx_status.block_hash,
spending_tx_status.block_height,
))?
#[maybe_async]
fn get_confirmed_tx(
- &self, txid: &Txid, expected_block_hash: Option<BlockHash>, known_block_height: Option<u32>,
+ &self, txid: Txid, expected_block_hash: Option<BlockHash>, known_block_height: Option<u32>,
) -> Result<Option<ConfirmedTx>, InternalError> {
if let Some(merkle_block) = maybe_await!(self.client.get_merkle_block(&txid))? {
let block_header = merkle_block.header;
let mut matches = Vec::new();
let mut indexes = Vec::new();
let _ = merkle_block.txn.extract_matches(&mut matches, &mut indexes);
- if indexes.len() != 1 || matches.len() != 1 || matches[0] != *txid {
+ if indexes.len() != 1 || matches.len() != 1 || matches[0] != txid {
log_error!(self.logger, "Retrieved Merkle block for txid {} doesn't match expectations. This should not happen. Please verify server integrity.", txid);
return Err(InternalError::Failed);
}
// unwrap() safety: len() > 0 is checked above
let pos = *indexes.first().unwrap() as usize;
if let Some(tx) = maybe_await!(self.client.get_tx(&txid))? {
+ if tx.txid() != txid {
+ log_error!(self.logger, "Retrieved transaction for txid {} doesn't match expectations. This should not happen. Please verify server integrity.", txid);
+ return Err(InternalError::Failed);
+ }
+
if let Some(block_height) = known_block_height {
// We can take a shortcut here if a previous call already gave us the height.
- return Ok(Some(ConfirmedTx { tx, block_header, pos, block_height }));
+ return Ok(Some(ConfirmedTx { tx, txid, block_header, pos, block_height }));
}
let block_status = maybe_await!(self.client.get_block_status(&block_hash))?;
if let Some(block_height) = block_status.height {
- return Ok(Some(ConfirmedTx { tx, block_header, pos, block_height }));
+ return Ok(Some(ConfirmedTx { tx, txid, block_header, pos, block_height }));
} else {
// If any previously-confirmed block suddenly is no longer confirmed, we found
// an inconsistency and should start over.
use crate::ln::msgs::DecodeError;
use crate::offers::invoice::BlindedPayInfo;
use crate::prelude::*;
-use crate::util::ser::{Readable, Writeable, Writer};
+use crate::util::ser::{HighZeroBytesDroppedBigSize, Readable, Writeable, Writer};
use core::convert::TryFrom;
impl Writeable for ForwardTlvs {
fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ let features_opt =
+ if self.features == BlindedHopFeatures::empty() { None }
+ else { Some(&self.features) };
encode_tlv_stream!(w, {
(2, self.short_channel_id, required),
(10, self.payment_relay, required),
(12, self.payment_constraints, required),
- (14, self.features, required)
+ (14, features_opt, option)
});
Ok(())
}
short_channel_id,
payment_relay: payment_relay.ok_or(DecodeError::InvalidValue)?,
payment_constraints: payment_constraints.0.unwrap(),
- features: features.ok_or(DecodeError::InvalidValue)?,
+ features: features.unwrap_or_else(BlindedHopFeatures::empty),
}))
} else {
if payment_relay.is_some() || features.is_some() { return Err(DecodeError::InvalidValue) }
})
}
-impl_writeable_msg!(PaymentRelay, {
- cltv_expiry_delta,
- fee_proportional_millionths,
- fee_base_msat
-}, {});
+impl Writeable for PaymentRelay {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.cltv_expiry_delta.write(w)?;
+ self.fee_proportional_millionths.write(w)?;
+ HighZeroBytesDroppedBigSize(self.fee_base_msat).write(w)
+ }
+}
+impl Readable for PaymentRelay {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let cltv_expiry_delta: u16 = Readable::read(r)?;
+ let fee_proportional_millionths: u32 = Readable::read(r)?;
+ let fee_base_msat: HighZeroBytesDroppedBigSize<u32> = Readable::read(r)?;
+ Ok(Self { cltv_expiry_delta, fee_proportional_millionths, fee_base_msat: fee_base_msat.0 })
+ }
+}
-impl_writeable_msg!(PaymentConstraints, {
- max_cltv_expiry,
- htlc_minimum_msat
-}, {});
+impl Writeable for PaymentConstraints {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.max_cltv_expiry.write(w)?;
+ HighZeroBytesDroppedBigSize(self.htlc_minimum_msat).write(w)
+ }
+}
+impl Readable for PaymentConstraints {
+ fn read<R: io::Read>(r: &mut R) -> Result<Self, DecodeError> {
+ let max_cltv_expiry: u32 = Readable::read(r)?;
+ let htlc_minimum_msat: HighZeroBytesDroppedBigSize<u64> = Readable::read(r)?;
+ Ok(Self { max_cltv_expiry, htlc_minimum_msat: htlc_minimum_msat.0 })
+ }
+}
#[cfg(test)]
mod tests {
use crate::util::logger::{Logger, Record};
use crate::util::ser::{Readable, ReadableArgs, RequiredWrapper, MaybeReadable, UpgradableRequired, Writer, Writeable, U48};
use crate::util::byte_utils;
-use crate::events::{Event, EventHandler};
+use crate::events::{ClosureReason, Event, EventHandler};
use crate::events::bump_transaction::{AnchorDescriptor, BumpTransactionEvent};
use crate::prelude::*;
/// A monitor event containing an HTLCUpdate.
HTLCEvent(HTLCUpdate),
+ /// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
+ /// channel. Holds information about the channel and why it was closed.
+ HolderForceClosedWithInfo {
+ /// The reason the channel was closed.
+ reason: ClosureReason,
+ /// The funding outpoint of the channel.
+ outpoint: OutPoint,
+ /// The channel ID of the channel.
+ channel_id: ChannelId,
+ },
+
/// Indicates we broadcasted the channel's latest commitment transaction and thus closed the
/// channel.
HolderForceClosed(OutPoint),
(2, monitor_update_id, required),
(4, channel_id, required),
},
+ (5, HolderForceClosedWithInfo) => {
+ (0, reason, upgradable_required),
+ (2, outpoint, required),
+ (4, channel_id, required),
+ },
;
(2, HTLCEvent),
(4, HolderForceClosed),
writer.write_all(&(self.pending_monitor_events.iter().filter(|ev| match ev {
MonitorEvent::HTLCEvent(_) => true,
MonitorEvent::HolderForceClosed(_) => true,
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
_ => false,
}).count() as u64).to_be_bytes())?;
for event in self.pending_monitor_events.iter() {
upd.write(writer)?;
},
MonitorEvent::HolderForceClosed(_) => 1u8.write(writer)?,
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. To keep
+ // backwards compatibility, we write a `HolderForceClosed` event along with the
+ // `HolderForceClosedWithInfo` event. This is deduplicated in the reader.
+ MonitorEvent::HolderForceClosedWithInfo { .. } => 1u8.write(writer)?,
_ => {}, // Covered in the TLV writes below
}
}
self.lockdown_from_offchain.write(writer)?;
self.holder_tx_signed.write(writer)?;
+ // If we have a `HolderForceClosedWithInfo` event, we need to write the `HolderForceClosed` for backwards compatibility.
+ let pending_monitor_events = match self.pending_monitor_events.iter().find(|ev| match ev {
+ MonitorEvent::HolderForceClosedWithInfo { .. } => true,
+ _ => false,
+ }) {
+ Some(MonitorEvent::HolderForceClosedWithInfo { outpoint, .. }) => {
+ let mut pending_monitor_events = self.pending_monitor_events.clone();
+ pending_monitor_events.push(MonitorEvent::HolderForceClosed(*outpoint));
+ pending_monitor_events
+ }
+ _ => self.pending_monitor_events.clone(),
+ };
+
write_tlv_fields!(writer, {
(1, self.funding_spend_confirmed, option),
(3, self.htlcs_resolved_on_chain, required_vec),
- (5, self.pending_monitor_events, required_vec),
+ (5, pending_monitor_events, required_vec),
(7, self.funding_spend_seen, required),
(9, self.counterparty_node_id, option),
(11, self.confirmed_commitment_tx_counterparty_output, option),
}
}
- fn generate_claimable_outpoints_and_watch_outputs(&mut self) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
+ fn generate_claimable_outpoints_and_watch_outputs(&mut self, reason: ClosureReason) -> (Vec<PackageTemplate>, Vec<TransactionOutputs>) {
let funding_outp = HolderFundingOutput::build(
self.funding_redeemscript.clone(),
self.channel_value_satoshis,
self.best_block.height, self.best_block.height
);
let mut claimable_outpoints = vec![commitment_package];
- self.pending_monitor_events.push(MonitorEvent::HolderForceClosed(self.funding_info.0));
+ let event = MonitorEvent::HolderForceClosedWithInfo {
+ reason,
+ outpoint: self.funding_info.0,
+ channel_id: self.channel_id,
+ };
+ self.pending_monitor_events.push(event);
+
// Although we aren't signing the transaction directly here, the transaction will be signed
// in the claim that is queued to OnchainTxHandler. We set holder_tx_signed here to reject
// new channel updates.
F::Target: FeeEstimator,
L::Target: Logger,
{
- let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs();
+ let (claimable_outpoints, _) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HolderForceClosed);
self.onchain_tx_handler.update_claims_view_from_requests(
claimable_outpoints, self.best_block.height, self.best_block.height, broadcaster,
fee_estimator, logger
let should_broadcast = self.should_broadcast_holder_commitment_txn(logger);
if should_broadcast {
- let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs();
+ let (mut new_outpoints, mut new_outputs) = self.generate_claimable_outpoints_and_watch_outputs(ClosureReason::HTLCsTimedOut);
claimable_outpoints.append(&mut new_outpoints);
watch_outputs.append(&mut new_outputs);
}
(19, channel_id, option),
});
+ // `HolderForceClosedWithInfo` replaced `HolderForceClosed` in v0.0.122. If we have both
+ // events, we can remove the `HolderForceClosed` event and just keep the `HolderForceClosedWithInfo`.
+ if let Some(ref mut pending_monitor_events) = pending_monitor_events {
+ if pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosed(_))) &&
+ pending_monitor_events.iter().any(|e| matches!(e, MonitorEvent::HolderForceClosedWithInfo { .. }))
+ {
+ pending_monitor_events.retain(|e| !matches!(e, MonitorEvent::HolderForceClosed(_)));
+ }
+ }
+
// Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the
// wrong `counterparty_payment_script` was being tracked. Fix it now on deserialization to
// give them a chance to recognize the spendable output.
use bitcoin::secp256k1::{PublicKey, Secp256k1};
use bitcoin::secp256k1::ecdsa::Signature;
-const EMPTY_SCRIPT_SIG_WEIGHT: u64 = 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64;
+pub(crate) const EMPTY_SCRIPT_SIG_WEIGHT: u64 = 1 /* empty script_sig */ * WITNESS_SCALE_FACTOR as u64;
const BASE_INPUT_SIZE: u64 = 32 /* txid */ + 4 /* vout */ + 4 /* sequence */;
-const BASE_INPUT_WEIGHT: u64 = BASE_INPUT_SIZE * WITNESS_SCALE_FACTOR as u64;
+pub(crate) const BASE_INPUT_WEIGHT: u64 = BASE_INPUT_SIZE * WITNESS_SCALE_FACTOR as u64;
/// A descriptor used to sign for a commitment transaction's anchor output.
#[derive(Clone, Debug, PartialEq, Eq)]
/// Another channel in the same funding batch closed before the funding transaction
/// was ready to be broadcast.
FundingBatchClosure,
+ /// One of our HTLCs timed out in a channel, causing us to force close the channel.
+ HTLCsTimedOut,
}
impl core::fmt::Display for ClosureReason {
ClosureReason::CounterpartyForceClosed { peer_msg } => {
f.write_fmt(format_args!("counterparty force-closed with message: {}", peer_msg))
},
- ClosureReason::HolderForceClosed => f.write_str("user manually force-closed the channel"),
+ ClosureReason::HolderForceClosed => f.write_str("user force-closed the channel"),
ClosureReason::LegacyCooperativeClosure => f.write_str("the channel was cooperatively closed"),
ClosureReason::CounterpartyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by our peer"),
ClosureReason::LocallyInitiatedCooperativeClosure => f.write_str("the channel was cooperatively closed by us"),
ClosureReason::OutdatedChannelManager => f.write_str("the ChannelManager read from disk was stale compared to ChannelMonitor(s)"),
ClosureReason::CounterpartyCoopClosedUnfundedChannel => f.write_str("the peer requested the unfunded channel be closed"),
ClosureReason::FundingBatchClosure => f.write_str("another channel in the same funding batch closed"),
+ ClosureReason::HTLCsTimedOut => f.write_str("htlcs on the channel timed out"),
}
}
}
(15, FundingBatchClosure) => {},
(17, CounterpartyInitiatedCooperativeClosure) => {},
(19, LocallyInitiatedCooperativeClosure) => {},
+ (21, HTLCsTimedOut) => {},
);
/// Intended destination of a failed HTLC as indicated in [`Event::HTLCHandlingFailed`].
/// This event is generated when a payment has been successfully forwarded through us and a
/// forwarding fee earned.
PaymentForwarded {
- /// The incoming channel between the previous node and us. This is only `None` for events
- /// generated or serialized by versions prior to 0.0.107.
+ /// The channel id of the incoming channel between the previous node and us.
+ ///
+ /// This is only `None` for events generated or serialized by versions prior to 0.0.107.
prev_channel_id: Option<ChannelId>,
- /// The outgoing channel between the next node and us. This is only `None` for events
- /// generated or serialized by versions prior to 0.0.107.
+ /// The channel id of the outgoing channel between the next node and us.
+ ///
+ /// This is only `None` for events generated or serialized by versions prior to 0.0.107.
next_channel_id: Option<ChannelId>,
+ /// The `user_channel_id` of the incoming channel between the previous node and us.
+ ///
+ /// This is only `None` for events generated or serialized by versions prior to 0.0.122.
+ prev_user_channel_id: Option<u128>,
+ /// The `user_channel_id` of the outgoing channel between the next node and us.
+ ///
+ /// This will be `None` if the payment was settled via an on-chain transaction. See the
+ /// caveat described for the `total_fee_earned_msat` field. Moreover it will be `None` for
+ /// events generated or serialized by versions prior to 0.0.122.
+ next_user_channel_id: Option<u128>,
/// The total fee, in milli-satoshis, which was earned as a result of the payment.
///
/// Note that if we force-closed the channel over which we forwarded an HTLC while the HTLC
});
}
&Event::PaymentForwarded {
- total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx,
- next_channel_id, outbound_amount_forwarded_msat, skimmed_fee_msat,
+ prev_channel_id, next_channel_id, prev_user_channel_id, next_user_channel_id,
+ total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx,
+ outbound_amount_forwarded_msat,
} => {
7u8.write(writer)?;
write_tlv_fields!(writer, {
(3, next_channel_id, option),
(5, outbound_amount_forwarded_msat, option),
(7, skimmed_fee_msat, option),
+ (9, prev_user_channel_id, option),
+ (11, next_user_channel_id, option),
});
},
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason,
},
7u8 => {
let f = || {
- let mut total_fee_earned_msat = None;
let mut prev_channel_id = None;
- let mut claim_from_onchain_tx = false;
let mut next_channel_id = None;
- let mut outbound_amount_forwarded_msat = None;
+ let mut prev_user_channel_id = None;
+ let mut next_user_channel_id = None;
+ let mut total_fee_earned_msat = None;
let mut skimmed_fee_msat = None;
+ let mut claim_from_onchain_tx = false;
+ let mut outbound_amount_forwarded_msat = None;
read_tlv_fields!(reader, {
(0, total_fee_earned_msat, option),
(1, prev_channel_id, option),
(3, next_channel_id, option),
(5, outbound_amount_forwarded_msat, option),
(7, skimmed_fee_msat, option),
+ (9, prev_user_channel_id, option),
+ (11, next_user_channel_id, option),
});
Ok(Some(Event::PaymentForwarded {
- total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
- outbound_amount_forwarded_msat, skimmed_fee_msat,
+ prev_channel_id, next_channel_id, prev_user_channel_id,
+ next_user_channel_id, total_fee_earned_msat, skimmed_fee_msat,
+ claim_from_onchain_tx, outbound_amount_forwarded_msat,
}))
};
f()
use crate::ln::msgs::ChannelMessageHandler;
use crate::ln::onion_utils;
use crate::ln::onion_utils::INVALID_ONION_BLINDING;
-use crate::ln::outbound_payment::Retry;
+use crate::ln::outbound_payment::{Retry, IDEMPOTENCY_TIMEOUT_TICKS};
use crate::offers::invoice::BlindedPayInfo;
use crate::prelude::*;
use crate::routing::router::{Payee, PaymentParameters, RouteParameters};
use crate::util::test_utils;
fn blinded_payment_path(
- payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
- channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
+ payment_secret: PaymentSecret, intro_node_min_htlc: u64, intro_node_max_htlc: u64,
+ node_ids: Vec<PublicKey>, channel_upds: &[&msgs::UnsignedChannelUpdate],
+ keys_manager: &test_utils::TestKeysInterface
) -> (BlindedPayInfo, BlindedPath) {
let mut intermediate_nodes = Vec::new();
- for (node_id, chan_upd) in node_ids.iter().zip(channel_upds) {
+ let mut intro_node_min_htlc_opt = Some(intro_node_min_htlc);
+ let mut intro_node_max_htlc_opt = Some(intro_node_max_htlc);
+ for (idx, (node_id, chan_upd)) in node_ids.iter().zip(channel_upds).enumerate() {
intermediate_nodes.push(ForwardNode {
node_id: *node_id,
tlvs: ForwardTlvs {
},
payment_constraints: PaymentConstraints {
max_cltv_expiry: u32::max_value(),
- htlc_minimum_msat: chan_upd.htlc_minimum_msat,
+ htlc_minimum_msat: intro_node_min_htlc_opt.take()
+ .unwrap_or_else(|| channel_upds[idx - 1].htlc_minimum_msat),
},
features: BlindedHopFeatures::empty(),
},
- htlc_maximum_msat: chan_upd.htlc_maximum_msat,
+ htlc_maximum_msat: intro_node_max_htlc_opt.take()
+ .unwrap_or_else(|| channel_upds[idx - 1].htlc_maximum_msat),
});
}
let payee_tlvs = ReceiveTlvs {
payment_secret,
payment_constraints: PaymentConstraints {
max_cltv_expiry: u32::max_value(),
- htlc_minimum_msat: channel_upds.last().unwrap().htlc_minimum_msat,
+ htlc_minimum_msat:
+ intro_node_min_htlc_opt.unwrap_or_else(|| channel_upds.last().unwrap().htlc_minimum_msat),
},
};
let mut secp_ctx = Secp256k1::new();
BlindedPath::new_for_payment(
&intermediate_nodes[..], *node_ids.last().unwrap(), payee_tlvs,
- channel_upds.last().unwrap().htlc_maximum_msat, TEST_FINAL_CLTV as u16, keys_manager, &secp_ctx
+ intro_node_max_htlc_opt.unwrap_or_else(|| channel_upds.last().unwrap().htlc_maximum_msat),
+ TEST_FINAL_CLTV as u16, keys_manager, &secp_ctx
).unwrap()
}
pub fn get_blinded_route_parameters(
- amt_msat: u64, payment_secret: PaymentSecret, node_ids: Vec<PublicKey>,
- channel_upds: &[&msgs::UnsignedChannelUpdate], keys_manager: &test_utils::TestKeysInterface
+ amt_msat: u64, payment_secret: PaymentSecret, intro_node_min_htlc: u64, intro_node_max_htlc: u64,
+ node_ids: Vec<PublicKey>, channel_upds: &[&msgs::UnsignedChannelUpdate],
+ keys_manager: &test_utils::TestKeysInterface
) -> RouteParameters {
RouteParameters::from_payment_params_and_value(
PaymentParameters::blinded(vec![
- blinded_payment_path(payment_secret, node_ids, channel_upds, keys_manager)
+ blinded_payment_path(
+ payment_secret, intro_node_min_htlc, intro_node_max_htlc, node_ids, channel_upds,
+ keys_manager
+ )
]), amt_msat
)
}
claim_payment_along_route(&nodes[0], expected_route, false, payment_preimage);
}
+#[test]
+fn mpp_to_three_hop_blinded_paths() {
+ let chanmon_cfgs = create_chanmon_cfgs(6);
+ let node_cfgs = create_node_cfgs(6, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(6, &node_cfgs, &[None, None, None, None, None, None]);
+ let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
+
+ // Create this network topology so node 0 MPP's over 2 3-hop blinded paths:
+ // n1 -- n3
+ // / \
+ // n0 n5
+ // \ /
+ // n2 -- n4
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 2);
+ let chan_upd_1_3 = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents;
+ let chan_upd_2_4 = create_announced_chan_between_nodes(&nodes, 2, 4).0.contents;
+ let chan_upd_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5).0.contents;
+ let chan_upd_4_5 = create_announced_chan_between_nodes(&nodes, 4, 5).0.contents;
+
+ let amt_msat = 15_000_000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[5], Some(amt_msat), None);
+ let route_params = {
+ let path_1_params = get_blinded_route_parameters(
+ amt_msat, payment_secret, 1, 1_0000_0000, vec![
+ nodes[1].node.get_our_node_id(), nodes[3].node.get_our_node_id(),
+ nodes[5].node.get_our_node_id()
+ ], &[&chan_upd_1_3, &chan_upd_3_5], &chanmon_cfgs[5].keys_manager
+ );
+ let path_2_params = get_blinded_route_parameters(
+ amt_msat, payment_secret, 1, 1_0000_0000, vec![
+ nodes[2].node.get_our_node_id(), nodes[4].node.get_our_node_id(),
+ nodes[5].node.get_our_node_id()
+ ], &[&chan_upd_2_4, &chan_upd_4_5], &chanmon_cfgs[5].keys_manager
+ );
+ let pay_params = PaymentParameters::blinded(
+ vec![
+ path_1_params.payment_params.payee.blinded_route_hints()[0].clone(),
+ path_2_params.payment_params.payee.blinded_route_hints()[0].clone()
+ ]
+ )
+ .with_bolt12_features(channelmanager::provided_bolt12_invoice_features(&UserConfig::default()))
+ .unwrap();
+ RouteParameters::from_payment_params_and_value(pay_params, amt_msat)
+ };
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(),
+ PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 2);
+
+ let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3], &nodes[5]], &[&nodes[2], &nodes[4], &nodes[5]]];
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[0], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), false, None);
+
+ let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[1], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), true, None);
+ claim_payment_along_route(&nodes[0], expected_route, false, payment_preimage);
+}
+
enum ForwardCheckFail {
// Fail a check on the inbound onion payload. In this case, we underflow when calculating the
// outgoing cltv_expiry.
#[test]
fn forward_checks_failure() {
- do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck);
- do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive);
- do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck);
+ do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck, true);
+ do_forward_checks_failure(ForwardCheckFail::InboundOnionCheck, false);
+ do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive, true);
+ do_forward_checks_failure(ForwardCheckFail::ForwardPayloadEncodedAsReceive, false);
+ do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck, true);
+ do_forward_checks_failure(ForwardCheckFail::OutboundChannelCheck, false);
}
-fn do_forward_checks_failure(check: ForwardCheckFail) {
+fn do_forward_checks_failure(check: ForwardCheckFail, intro_fails: bool) {
// Ensure we'll fail backwards properly if a forwarding check fails on initial update_add
// receipt.
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
// We need the session priv to construct a bogus onion packet later.
*nodes[0].keys_manager.override_random_bytes.lock().unwrap() = Some([3; 32]);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+ let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
let amt_msat = 5000;
- let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
- nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
- &chanmon_cfgs[2].keys_manager);
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(),
+ &[&chan_upd_1_2, &chan_upd_2_3], &chanmon_cfgs[3].keys_manager);
let route = get_route(&nodes[0], &route_params).unwrap();
node_cfgs[0].router.expect_find_route(route_params.clone(), Ok(route.clone()));
nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
check_added_monitors(&nodes[0], 1);
- let mut events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 1);
- let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
- let mut payment_event = SendEvent::from_event(ev);
+ macro_rules! cause_error {
+ ($src_node_idx: expr, $target_node_idx: expr, $update_add: expr) => {
+ match check {
+ ForwardCheckFail::InboundOnionCheck => {
+ $update_add.cltv_expiry = 10; // causes outbound CLTV expiry to underflow
+ },
+ ForwardCheckFail::ForwardPayloadEncodedAsReceive => {
+ let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
+ let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
+ let cur_height = nodes[0].best_block_info().1;
+ let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
+ &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
+ // Remove the receive payload so the blinded forward payload is encoded as a final payload
+ // (i.e. next_hop_hmac == [0; 32])
+ onion_payloads.pop();
+ if $target_node_idx + 1 < nodes.len() {
+ onion_payloads.pop();
+ }
+ $update_add.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
+ },
+ ForwardCheckFail::OutboundChannelCheck => {
+ // The intro node will see that the next-hop peer is disconnected and fail the HTLC backwards.
+ nodes[$src_node_idx].node.peer_disconnected(&nodes[$target_node_idx].node.get_our_node_id());
+ }
+ }
+ }
+ }
- let mut update_add = &mut payment_event.msgs[0];
- match check {
- ForwardCheckFail::InboundOnionCheck => {
- update_add.cltv_expiry = 10; // causes outbound CLTV expiry to underflow
- },
- ForwardCheckFail::ForwardPayloadEncodedAsReceive => {
- let session_priv = SecretKey::from_slice(&[3; 32]).unwrap();
- let onion_keys = onion_utils::construct_onion_keys(&Secp256k1::new(), &route.paths[0], &session_priv).unwrap();
- let cur_height = nodes[0].best_block_info().1;
- let (mut onion_payloads, ..) = onion_utils::build_onion_payloads(
- &route.paths[0], amt_msat, RecipientOnionFields::spontaneous_empty(), cur_height, &None).unwrap();
- // Remove the receive payload so the blinded forward payload is encoded as a final payload
- // (i.e. next_hop_hmac == [0; 32])
- onion_payloads.pop();
- update_add.onion_routing_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, [0; 32], &payment_hash).unwrap();
- },
- ForwardCheckFail::OutboundChannelCheck => {
- // The intro node will see that the next-hop peer is disconnected and fail the HTLC backwards.
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
- },
+ let mut updates_0_1 = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
+ let update_add = &mut updates_0_1.update_add_htlcs[0];
+
+ if intro_fails {
+ cause_error!(1, 2, update_add);
}
- nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
+
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &update_add);
check_added_monitors!(nodes[1], 0);
- do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, true, true);
+ do_commitment_signed_dance(&nodes[1], &nodes[0], &updates_0_1.commitment_signed, true, true);
+
+ if intro_fails {
+ let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+ return
+ }
+
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ let mut update_add = &mut updates_1_2.update_add_htlcs[0];
+
+ cause_error!(2, 3, update_add);
+
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true);
+
+ let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ let update_malformed = &mut updates.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+
+ // Ensure the intro node will properly blind the error if its downstream node failed to do so.
+ update_malformed.sha256_of_onion = [1; 32];
+ update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1;
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false);
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
let amt_msat = 5000;
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
&chanmon_cfgs[2].keys_manager);
#[test]
fn forward_fail_in_process_pending_htlc_fwds() {
- do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected);
- do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected, true);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdPeerDisconnected, false);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed, true);
+ do_forward_fail_in_process_pending_htlc_fwds(ProcessPendingHTLCsCheck::FwdChannelClosed, false);
}
-fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck) {
+fn do_forward_fail_in_process_pending_htlc_fwds(check: ProcessPendingHTLCsCheck, intro_fails: bool) {
// Ensure the intro node will error backwards properly if the HTLC fails in
// process_pending_htlc_forwards.
- let chanmon_cfgs = create_chanmon_cfgs(3);
- let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
- let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
- let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
- let (chan_upd_1_2, channel_id) = {
+ let (chan_upd_1_2, chan_id_1_2) = {
let chan = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
(chan.0.contents, chan.2)
};
+ let (chan_upd_2_3, chan_id_2_3) = {
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
+ (chan.0.contents, chan.2)
+ };
let amt_msat = 5000;
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
- nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2, &chan_upd_2_3],
&chanmon_cfgs[2].keys_manager);
nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
check_added_monitors!(nodes[1], 0);
do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event.commitment_msg, false, false);
- match check {
- ProcessPendingHTLCsCheck::FwdPeerDisconnected => {
- // Disconnect the next-hop peer so when we go to forward in process_pending_htlc_forwards, the
- // intro node will error backwards.
- nodes[1].node.peer_disconnected(&nodes[2].node.get_our_node_id());
- expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
- vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id }]);
- },
- ProcessPendingHTLCsCheck::FwdChannelClosed => {
- // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards,
- // the intro node will error backwards.
- nodes[1].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[2].node.get_our_node_id()).unwrap();
- let events = nodes[1].node.get_and_clear_pending_events();
- match events[0] {
- crate::events::Event::PendingHTLCsForwardable { .. } => {},
- _ => panic!("Unexpected event {:?}", events),
- };
- match events[1] {
- crate::events::Event::ChannelClosed { .. } => {},
- _ => panic!("Unexpected event {:?}", events),
+ macro_rules! cause_error {
+ ($prev_node: expr, $curr_node: expr, $next_node: expr, $failed_chan_id: expr, $failed_scid: expr) => {
+ match check {
+ ProcessPendingHTLCsCheck::FwdPeerDisconnected => {
+ // Disconnect the next-hop peer so when we go to forward in process_pending_htlc_forwards, the
+ // intro node will error backwards.
+ $curr_node.node.peer_disconnected(&$next_node.node.get_our_node_id());
+ expect_pending_htlcs_forwardable!($curr_node);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
+ vec![HTLCDestination::NextHopChannel { node_id: Some($next_node.node.get_our_node_id()), channel_id: $failed_chan_id }]);
+ },
+ ProcessPendingHTLCsCheck::FwdChannelClosed => {
+ // Force close the next-hop channel so when we go to forward in process_pending_htlc_forwards,
+ // the intro node will error backwards.
+ $curr_node.node.force_close_broadcasting_latest_txn(&$failed_chan_id, &$next_node.node.get_our_node_id()).unwrap();
+ let events = $curr_node.node.get_and_clear_pending_events();
+ match events[0] {
+ crate::events::Event::PendingHTLCsForwardable { .. } => {},
+ _ => panic!("Unexpected event {:?}", events),
+ };
+ match events[1] {
+ crate::events::Event::ChannelClosed { .. } => {},
+ _ => panic!("Unexpected event {:?}", events),
+ }
+
+ $curr_node.node.process_pending_htlc_forwards();
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!($curr_node,
+ vec![HTLCDestination::UnknownNextHop { requested_forward_scid: $failed_scid }]);
+ check_closed_broadcast(&$curr_node, 1, true);
+ check_added_monitors!($curr_node, 1);
+ $curr_node.node.process_pending_htlc_forwards();
+ },
}
+ }
+ }
- nodes[1].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1],
- vec![HTLCDestination::UnknownNextHop { requested_forward_scid: chan_upd_1_2.short_channel_id }]);
- check_closed_broadcast(&nodes[1], 1, true);
- check_added_monitors!(nodes[1], 1);
- nodes[1].node.process_pending_htlc_forwards();
- },
+ if intro_fails {
+ cause_error!(nodes[0], nodes[1], nodes[2], chan_id_1_2, chan_upd_1_2.short_channel_id);
+ let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ check_added_monitors!(nodes[1], 1);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+ return
}
+ expect_pending_htlcs_forwardable!(nodes[1]);
+ check_added_monitors!(nodes[1], 1);
+
+ let mut updates_1_2 = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id());
+ let mut update_add = &mut updates_1_2.update_add_htlcs[0];
+ nodes[2].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &update_add);
+ check_added_monitors!(nodes[2], 0);
+ do_commitment_signed_dance(&nodes[2], &nodes[1], &updates_1_2.commitment_signed, true, true);
+
+ cause_error!(nodes[1], nodes[2], nodes[3], chan_id_2_3, chan_upd_2_3.short_channel_id);
+ check_added_monitors!(nodes[2], 1);
+
+ let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ let update_malformed = &mut updates.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+
+ // Ensure the intro node will properly blind the error if its downstream node failed to do so.
+ update_malformed.sha256_of_onion = [1; 32];
+ update_malformed.failure_code = INVALID_ONION_BLINDING ^ 1;
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[1], &nodes[2], &updates.commitment_signed, true, false);
+
let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
- check_added_monitors!(nodes[1], 1);
do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
-
expect_payment_failed_conditions(&nodes[0], payment_hash, false,
PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
}
let intercept_scid = nodes[1].node.get_intercept_scid();
let mut intercept_chan_upd = chan_upd;
intercept_chan_upd.short_channel_id = intercept_scid;
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&intercept_chan_upd],
&chanmon_cfgs[2].keys_manager);
let amt_msat = 5000;
let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
&chanmon_cfgs[2].keys_manager);
let amt_msat = 5000;
let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[4], Some(amt_msat), None);
- let route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(2).map(|n| n.node.get_our_node_id()).collect(),
&[&chan_upd_2_3, &chan_upd_3_4], &chanmon_cfgs[4].keys_manager);
claim_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]], payment_preimage);
}
+#[test]
+fn three_hop_blinded_path_fail() {
+ // Test that an intermediate blinded forwarding node gets failed back to with
+ // malformed and also fails back themselves with malformed.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+ let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents;
+
+ let amt_msat = 5000;
+ let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(),
+ &[&chan_upd_1_2, &chan_upd_2_3], &chanmon_cfgs[3].keys_manager);
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3]]], amt_msat, payment_hash, payment_secret);
+
+ nodes[3].node.fail_htlc_backwards(&payment_hash);
+ expect_pending_htlcs_forwardable_conditions(
+ nodes[3].node.get_and_clear_pending_events(), &[HTLCDestination::FailedPayment { payment_hash }]
+ );
+ nodes[3].node.process_pending_htlc_forwards();
+ check_added_monitors!(nodes[3], 1);
+
+ let updates_3_2 = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
+ assert_eq!(updates_3_2.update_fail_malformed_htlcs.len(), 1);
+ let update_malformed = &updates_3_2.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ nodes[2].node.handle_update_fail_malformed_htlc(&nodes[3].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[2], &nodes[3], &updates_3_2.commitment_signed, true, false);
+
+ let updates_2_1 = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
+ assert_eq!(updates_2_1.update_fail_malformed_htlcs.len(), 1);
+ let update_malformed = &updates_2_1.update_fail_malformed_htlcs[0];
+ assert_eq!(update_malformed.sha256_of_onion, [0; 32]);
+ assert_eq!(update_malformed.failure_code, INVALID_ONION_BLINDING);
+ nodes[1].node.handle_update_fail_malformed_htlc(&nodes[2].node.get_our_node_id(), update_malformed);
+ do_commitment_signed_dance(&nodes[1], &nodes[2], &updates_2_1.commitment_signed, true, false);
+
+ let updates_1_0 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ assert_eq!(updates_1_0.update_fail_htlcs.len(), 1);
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates_1_0.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates_1_0.commitment_signed, false, false);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
#[derive(PartialEq)]
enum ReceiveCheckFail {
// The recipient fails the payment upon `PaymentClaimable`.
Some(TEST_FINAL_CLTV as u16 - 2)
} else { None };
let (_, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), excess_final_cltv_delta_opt);
- let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_1_2],
&chanmon_cfgs[2].keys_manager);
let high_htlc_min_bp = {
let mut high_htlc_minimum_upd = chan_upd_1_2.clone();
high_htlc_minimum_upd.htlc_minimum_msat = amt_msat + 1000;
- let high_htlc_min_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ let high_htlc_min_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&high_htlc_minimum_upd],
&chanmon_cfgs[2].keys_manager);
if let Payee::Blinded { route_hints, .. } = high_htlc_min_params.payment_params.payee {
let route_params = {
let pay_params = PaymentParameters::blinded(
vec![
- blinded_payment_path(payment_secret,
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
vec![nodes[1].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_1_3.0.contents],
&chanmon_cfgs[3].keys_manager
),
- blinded_payment_path(payment_secret,
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
vec![nodes[2].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_2_3.0.contents],
&chanmon_cfgs[3].keys_manager
),
_ => panic!()
}
}
+
+#[test]
+fn min_htlc() {
+ // The min htlc of a blinded path is the max (htlc_min - following_fees) along the path. Make sure
+ // the payment succeeds when we calculate the min htlc this way.
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let mut node_1_cfg = test_default_channel_config();
+ node_1_cfg.channel_handshake_config.our_htlc_minimum_msat = 2000;
+ node_1_cfg.channel_config.forwarding_fee_base_msat = 1000;
+ node_1_cfg.channel_config.forwarding_fee_proportional_millionths = 100_000;
+ let mut node_2_cfg = test_default_channel_config();
+ node_2_cfg.channel_handshake_config.our_htlc_minimum_msat = 5000;
+ node_2_cfg.channel_config.forwarding_fee_base_msat = 200;
+ node_2_cfg.channel_config.forwarding_fee_proportional_millionths = 150_000;
+ let mut node_3_cfg = test_default_channel_config();
+ node_3_cfg.channel_handshake_config.our_htlc_minimum_msat = 2000;
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, Some(node_1_cfg), Some(node_2_cfg), Some(node_3_cfg)]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+ let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
+
+ let min_htlc_msat = {
+ // The min htlc for this setup is nodes[2]'s htlc_minimum_msat minus the
+ // following fees.
+ let post_base_fee = chan_2_3.1.contents.htlc_minimum_msat - chan_2_3.0.contents.fee_base_msat as u64;
+ let prop_fee = chan_2_3.0.contents.fee_proportional_millionths as u64;
+ (post_base_fee * 1_000_000 + 1_000_000 + prop_fee - 1) / (prop_fee + 1_000_000)
+ };
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(min_htlc_msat), None);
+ let mut route_params = get_blinded_route_parameters(
+ min_htlc_msat, payment_secret, chan_1_2.1.contents.htlc_minimum_msat,
+ chan_1_2.1.contents.htlc_maximum_msat, vec![nodes[1].node.get_our_node_id(),
+ nodes[2].node.get_our_node_id(), nodes[3].node.get_our_node_id()],
+ &[&chan_1_2.0.contents, &chan_2_3.0.contents], &chanmon_cfgs[3].keys_manager);
+ assert_eq!(min_htlc_msat,
+ route_params.payment_params.payee.blinded_route_hints()[0].0.htlc_minimum_msat);
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3]]], min_htlc_msat, payment_hash, payment_secret);
+ claim_payment(&nodes[0], &[&nodes[1], &nodes[2], &nodes[3]], payment_preimage);
+
+ // Paying 1 less than the min fails.
+ for _ in 0..IDEMPOTENCY_TIMEOUT_TICKS + 1 {
+ nodes[0].node.timer_tick_occurred();
+ }
+ if let Payee::Blinded { ref mut route_hints, .. } = route_params.payment_params.payee {
+ route_hints[0].0.htlc_minimum_msat -= 1;
+ } else { panic!() }
+ route_params.final_value_msat -= 1;
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+
+ let mut payment_event_0_1 = {
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ SendEvent::from_event(ev)
+ };
+ nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event_0_1.msgs[0]);
+ check_added_monitors!(nodes[1], 0);
+ do_commitment_signed_dance(&nodes[1], &nodes[0], &payment_event_0_1.commitment_msg, true, true);
+ let mut updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
+ do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false);
+ expect_payment_failed_conditions(&nodes[0], payment_hash, false,
+ PaymentFailedConditions::new().expected_htlc_error_data(INVALID_ONION_BLINDING, &[0; 32]));
+}
+
+#[test]
+fn conditionally_round_fwd_amt() {
+ // Previously, the (rng-found) feerates below caught a bug where an intermediate node would
+ // calculate an amt_to_forward that underpaid them by 1 msat, caused by rounding up the outbound
+ // amount on top of an already rounded-up total routing fee. Ensure that we'll conditionally round
+ // down intermediate nodes' outbound amounts based on whether rounding up will result in
+ // undercharging for relay.
+ let chanmon_cfgs = create_chanmon_cfgs(5);
+ let node_cfgs = create_node_cfgs(5, &chanmon_cfgs);
+
+ let mut node_1_cfg = test_default_channel_config();
+ node_1_cfg.channel_config.forwarding_fee_base_msat = 247371;
+ node_1_cfg.channel_config.forwarding_fee_proportional_millionths = 86552;
+
+ let mut node_2_cfg = test_default_channel_config();
+ node_2_cfg.channel_config.forwarding_fee_base_msat = 198921;
+ node_2_cfg.channel_config.forwarding_fee_proportional_millionths = 681759;
+
+ let mut node_3_cfg = test_default_channel_config();
+ node_3_cfg.channel_config.forwarding_fee_base_msat = 132845;
+ node_3_cfg.channel_config.forwarding_fee_proportional_millionths = 552561;
+
+ let node_chanmgrs = create_node_chanmgrs(5, &node_cfgs, &[None, Some(node_1_cfg), Some(node_2_cfg), Some(node_3_cfg), None]);
+ let nodes = create_network(5, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0);
+ let chan_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0);
+ let chan_3_4 = create_announced_chan_between_nodes_with_value(&nodes, 3, 4, 1_000_000, 0);
+
+ let amt_msat = 100_000;
+ let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[4], Some(amt_msat), None);
+ let mut route_params = get_blinded_route_parameters(amt_msat, payment_secret,
+ chan_1_2.1.contents.htlc_minimum_msat, chan_1_2.1.contents.htlc_maximum_msat,
+ vec![nodes[1].node.get_our_node_id(), nodes[2].node.get_our_node_id(),
+ nodes[3].node.get_our_node_id(), nodes[4].node.get_our_node_id()],
+ &[&chan_1_2.0.contents, &chan_2_3.0.contents, &chan_3_4.0.contents],
+ &chanmon_cfgs[4].keys_manager);
+ route_params.max_total_routing_fee_msat = None;
+
+ nodes[0].node.send_payment(payment_hash, RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+ pass_along_route(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3], &nodes[4]]], amt_msat, payment_hash, payment_secret);
+ nodes[4].node.claim_funds(payment_preimage);
+ let expected_path = &[&nodes[1], &nodes[2], &nodes[3], &nodes[4]];
+ let expected_route = &[&expected_path[..]];
+ let mut args = ClaimAlongRouteArgs::new(&nodes[0], &expected_route[..], payment_preimage)
+ .allow_1_msat_fee_overpay();
+ let expected_fee = pass_claimed_payment_along_route(args);
+ expect_payment_sent(&nodes[0], payment_preimage, Some(Some(expected_fee)), true, true);
+}
+
+#[test]
+fn blinded_keysend() {
+ let mut mpp_keysend_config = test_default_channel_config();
+ mpp_keysend_config.accept_mpp_keysend = true;
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, Some(mpp_keysend_config)]);
+ let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 0);
+ let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents;
+
+ let amt_msat = 5000;
+ let (keysend_preimage, _, payment_secret) = get_payment_preimage_hash(&nodes[2], None, None);
+ let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1,
+ 1_0000_0000,
+ nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(),
+ &[&chan_upd_1_2], &chanmon_cfgs[2].keys_manager);
+
+ let payment_hash = nodes[0].node.send_spontaneous_payment_with_retry(Some(keysend_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(keysend_preimage.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors(&nodes[0], 1);
+
+ let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[2]]];
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[0], amt_msat, payment_hash, Some(payment_secret), ev.clone(), true, Some(keysend_preimage));
+ claim_payment_along_route(&nodes[0], expected_route, false, keysend_preimage);
+}
+
+#[test]
+fn blinded_mpp_keysend() {
+ let mut mpp_keysend_config = test_default_channel_config();
+ mpp_keysend_config.accept_mpp_keysend = true;
+ let chanmon_cfgs = create_chanmon_cfgs(4);
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, Some(mpp_keysend_config)]);
+ let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ create_announced_chan_between_nodes(&nodes, 0, 2);
+ let chan_1_3 = create_announced_chan_between_nodes(&nodes, 1, 3);
+ let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3);
+
+ let amt_msat = 15_000_000;
+ let (keysend_preimage, _, payment_secret) = get_payment_preimage_hash(&nodes[3], None, None);
+ let route_params = {
+ let pay_params = PaymentParameters::blinded(
+ vec![
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
+ vec![nodes[1].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_1_3.0.contents],
+ &chanmon_cfgs[3].keys_manager
+ ),
+ blinded_payment_path(payment_secret, 1, 1_0000_0000,
+ vec![nodes[2].node.get_our_node_id(), nodes[3].node.get_our_node_id()], &[&chan_2_3.0.contents],
+ &chanmon_cfgs[3].keys_manager
+ ),
+ ]
+ )
+ .with_bolt12_features(channelmanager::provided_bolt12_invoice_features(&UserConfig::default()))
+ .unwrap();
+ RouteParameters::from_payment_params_and_value(pay_params, amt_msat)
+ };
+
+ let payment_hash = nodes[0].node.send_spontaneous_payment_with_retry(Some(keysend_preimage), RecipientOnionFields::spontaneous_empty(), PaymentId(keysend_preimage.0), route_params, Retry::Attempts(0)).unwrap();
+ check_added_monitors!(nodes[0], 2);
+
+ let expected_route: &[&[&Node]] = &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]];
+ let mut events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+
+ let ev = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[0], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), false, Some(keysend_preimage));
+
+ let ev = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events);
+ pass_along_path(&nodes[0], expected_route[1], amt_msat, payment_hash.clone(),
+ Some(payment_secret), ev.clone(), true, Some(keysend_preimage));
+ claim_payment_along_route(&nodes[0], expected_route, false, keysend_preimage);
+}
let mut events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), if close_during_reload { 2 } else { 1 });
expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000),
- None, close_during_reload, false);
+ None, close_during_reload, false, false);
if close_during_reload {
match events[0] {
Event::ChannelClosed { .. } => {},
/// For HTLCs received by LDK, these will ultimately bubble back up as
/// [`RecipientOnionFields::custom_tlvs`].
custom_tlvs: Vec<(u64, Vec<u8>)>,
+ /// Set if this HTLC is the final hop in a multi-hop blinded path.
+ requires_blinded_error: bool,
},
}
match self {
Self::Forward { blinded: Some(BlindedForward { failure, .. }), .. } => Some(*failure),
Self::Receive { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
+ Self::ReceiveKeysend { requires_blinded_error: true, .. } => Some(BlindedFailure::FromBlindedNode),
_ => None,
}
}
(incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data },
Some(payment_data), phantom_shared_secret, onion_fields)
},
- PendingHTLCRouting::ReceiveKeysend { payment_data, payment_preimage, payment_metadata, incoming_cltv_expiry, custom_tlvs } => {
+ PendingHTLCRouting::ReceiveKeysend {
+ payment_data, payment_preimage, payment_metadata,
+ incoming_cltv_expiry, custom_tlvs, requires_blinded_error: _
+ } => {
let onion_fields = RecipientOnionFields {
payment_secret: payment_data.as_ref().map(|data| data.payment_secret),
payment_metadata,
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage,
forwarded_htlc_value_msat: Option<u64>, skimmed_fee_msat: Option<u64>, from_onchain: bool,
startup_replay: bool, next_channel_counterparty_node_id: Option<PublicKey>,
- next_channel_outpoint: OutPoint, next_channel_id: ChannelId,
+ next_channel_outpoint: OutPoint, next_channel_id: ChannelId, next_user_channel_id: Option<u128>,
) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_channel_id = hop_data.channel_id;
+ let prev_user_channel_id = hop_data.user_channel_id;
let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data);
#[cfg(debug_assertions)]
let claiming_chan_funding_outpoint = hop_data.outpoint;
- #[cfg(debug_assertions)]
- let claiming_channel_id = hop_data.channel_id;
let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat, definitely_duplicate| {
let chan_to_release =
BackgroundEvent::MonitorUpdatesComplete {
channel_id, ..
} =>
- *channel_id == claiming_channel_id,
+ *channel_id == prev_channel_id,
}
}), "{:?}", *background_events);
}
"skimmed_fee_msat must always be included in total_fee_earned_msat");
Some(MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel {
event: events::Event::PaymentForwarded {
- total_fee_earned_msat,
- claim_from_onchain_tx: from_onchain,
prev_channel_id: Some(prev_channel_id),
next_channel_id: Some(next_channel_id),
- outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
+ prev_user_channel_id,
+ next_user_channel_id,
+ total_fee_earned_msat,
skimmed_fee_msat,
+ claim_from_onchain_tx: from_onchain,
+ outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
},
downstream_counterparty_and_funding_outpoint: chan_to_release,
})
// happening and return an error. N.B. that we create channel with an outbound SCID of zero so
// that we can delay allocating the SCID until after we're sure that the checks below will
// succeed.
- let mut channel = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
+ let res = match peer_state.inbound_channel_request_by_id.remove(temporary_channel_id) {
Some(unaccepted_channel) => {
let best_block_height = self.best_block.read().unwrap().height;
InboundV1Channel::new(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features,
&unaccepted_channel.open_channel_msg, user_channel_id, &self.default_configuration, best_block_height,
- &self.logger, accept_0conf).map_err(|e| {
- let err_str = e.to_string();
- log_error!(logger, "{}", err_str);
-
- APIError::ChannelUnavailable { err: err_str }
- })
- }
+ &self.logger, accept_0conf).map_err(|err| MsgHandleErrInternal::from_chan_no_close(err, *temporary_channel_id))
+ },
_ => {
let err_str = "No such channel awaiting to be accepted.".to_owned();
log_error!(logger, "{}", err_str);
- Err(APIError::APIMisuseError { err: err_str })
+ return Err(APIError::APIMisuseError { err: err_str });
}
- }?;
+ };
- if accept_0conf {
- // This should have been correctly configured by the call to InboundV1Channel::new.
- debug_assert!(channel.context.minimum_depth().unwrap() == 0);
- } else if channel.context.get_channel_type().requires_zero_conf() {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ match res {
+ Err(err) => {
+ mem::drop(peer_state_lock);
+ mem::drop(per_peer_state);
+ match handle_error!(self, Result::<(), MsgHandleErrInternal>::Err(err), *counterparty_node_id) {
+ Ok(_) => unreachable!("`handle_error` only returns Err as we've passed in an Err"),
+ Err(e) => {
+ return Err(APIError::ChannelUnavailable { err: e.err });
+ },
}
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
- log_error!(logger, "{}", err_str);
+ }
+ Ok(mut channel) => {
+ if accept_0conf {
+ // This should have been correctly configured by the call to InboundV1Channel::new.
+ debug_assert!(channel.context.minimum_depth().unwrap() == 0);
+ } else if channel.context.get_channel_type().requires_zero_conf() {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned();
+ log_error!(logger, "{}", err_str);
- return Err(APIError::APIMisuseError { err: err_str });
- } else {
- // If this peer already has some channels, a new channel won't increase our number of peers
- // with unfunded channels, so as long as we aren't over the maximum number of unfunded
- // channels per-peer we can accept channels from a peer with existing ones.
- if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.context.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
- }
- };
- peer_state.pending_msg_events.push(send_msg_err_event);
- let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
- log_error!(logger, "{}", err_str);
+ return Err(APIError::APIMisuseError { err: err_str });
+ } else {
+ // If this peer already has some channels, a new channel won't increase our number of peers
+ // with unfunded channels, so as long as we aren't over the maximum number of unfunded
+ // channels per-peer we can accept channels from a peer with existing ones.
+ if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.context.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let err_str = "Too many peers with unfunded channels, refusing to accept new ones".to_owned();
+ log_error!(logger, "{}", err_str);
- return Err(APIError::APIMisuseError { err: err_str });
- }
- }
+ return Err(APIError::APIMisuseError { err: err_str });
+ }
+ }
- // Now that we know we have a channel, assign an outbound SCID alias.
- let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
- channel.context.set_outbound_scid_alias(outbound_scid_alias);
+ // Now that we know we have a channel, assign an outbound SCID alias.
+ let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+ channel.context.set_outbound_scid_alias(outbound_scid_alias);
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.context.get_counterparty_node_id(),
- msg: channel.accept_inbound_channel(),
- });
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: channel.context.get_counterparty_node_id(),
+ msg: channel.accept_inbound_channel(),
+ });
- peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
+ peer_state.channel_by_id.insert(temporary_channel_id.clone(), ChannelPhase::UnfundedInboundV1(channel));
- Ok(())
+ Ok(())
+ },
+ }
}
/// Gets the number of peers which match the given filter and do not have any funded, outbound,
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
+ let next_user_channel_id;
let (htlc_source, forwarded_htlc_value, skimmed_fee_msat) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
// outbound HTLC is claimed. This is guaranteed to all complete before we
// process the RAA as messages are processed from single peers serially.
funding_txo = chan.context.get_funding_txo().expect("We won't accept a fulfill until funded");
+ next_user_channel_id = chan.context.get_user_id();
res
} else {
return try_chan_phase_entry!(self, Err(ChannelError::Close(
};
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(),
Some(forwarded_htlc_value), skimmed_fee_msat, false, false, Some(*counterparty_node_id),
- funding_txo, msg.channel_id
+ funding_txo, msg.channel_id, Some(next_user_channel_id),
);
Ok(())
log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage);
self.claim_funds_internal(htlc_update.source, preimage,
htlc_update.htlc_value_satoshis.map(|v| v * 1000), None, true,
- false, counterparty_node_id, funding_outpoint, channel_id);
+ false, counterparty_node_id, funding_outpoint, channel_id, None);
} else {
log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash);
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id };
self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
}
},
- MonitorEvent::HolderForceClosed(_funding_outpoint) => {
+ MonitorEvent::HolderForceClosed(_) | MonitorEvent::HolderForceClosedWithInfo { .. } => {
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) {
if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) {
- failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed));
+ let reason = if let MonitorEvent::HolderForceClosedWithInfo { reason, .. } = monitor_event {
+ reason
+ } else {
+ ClosureReason::HolderForceClosed
+ };
+ failed_channels.push(chan.context.force_shutdown(false, reason.clone()));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
pending_msg_events.push(events::MessageSendEvent::HandleError {
node_id: chan.context.get_counterparty_node_id(),
action: msgs::ErrorAction::DisconnectPeer {
- msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: "Channel force-closed".to_owned() })
+ msg: Some(msgs::ErrorMessage { channel_id: chan.context.channel_id(), data: reason.to_string() })
},
});
}
.absolute_expiry(absolute_expiry)
.path(path);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop($self);
+
let expiration = StaleExpiration::AbsoluteTimeout(absolute_expiry);
$self.pending_outbound_payments
.add_new_awaiting_invoice(
/// Errors if:
/// - a duplicate `payment_id` is provided given the caveats in the aforementioned link,
/// - the provided parameters are invalid for the offer,
+ /// - the offer is for an unsupported chain, or
/// - the parameterized [`Router`] is unable to create a blinded reply path for the invoice
/// request.
///
let invoice_request = builder.build_and_sign()?;
let reply_path = self.create_blinded_path().map_err(|_| Bolt12SemanticError::MissingPaths)?;
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
let expiration = StaleExpiration::TimerTicks(1);
self.pending_outbound_payments
.add_new_awaiting_invoice(
///
/// # Errors
///
- /// Errors if the parameterized [`Router`] is unable to create a blinded payment path or reply
- /// path for the invoice.
+ /// Errors if:
+ /// - the refund is for an unsupported chain, or
+ /// - the parameterized [`Router`] is unable to create a blinded payment path or reply path for
+ /// the invoice.
///
/// [`Bolt12Invoice`]: crate::offers::invoice::Bolt12Invoice
pub fn request_refund_payment(&self, refund: &Refund) -> Result<(), Bolt12SemanticError> {
let amount_msats = refund.amount_msats();
let relative_expiry = DEFAULT_RELATIVE_EXPIRY.as_secs() as u32;
+ if refund.chain() != self.chain_hash {
+ return Err(Bolt12SemanticError::UnsupportedChain);
+ }
+
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
match self.create_inbound_payment(Some(amount_msats), relative_expiry, None) {
Ok((payment_hash, payment_secret)) => {
let payment_paths = self.create_blinded_payment_paths(amount_msats, payment_secret)
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
-
match &msg.data as &str {
"cannot co-op close channel w/ active htlcs"|
"link failed to shutdown" =>
// We're not going to bother handling this in a sensible way, instead simply
// repeating the Shutdown message on repeat until morale improves.
if !msg.channel_id.is_zero() {
- let per_peer_state = self.per_peer_state.read().unwrap();
- let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
- if peer_state_mutex_opt.is_none() { return; }
- let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
- if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
- if let Some(msg) = chan.get_outbound_shutdown() {
- peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
- peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: *counterparty_node_id,
- action: msgs::ErrorAction::SendWarningMessage {
- msg: msgs::WarningMessage {
- channel_id: msg.channel_id,
- data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
- },
- log_level: Level::Trace,
+ PersistenceNotifierGuard::optionally_notify(
+ self,
+ || -> NotifyOption {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return NotifyOption::SkipPersistNoEvents; }
+ let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
+ if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get(&msg.channel_id) {
+ if let Some(msg) = chan.get_outbound_shutdown() {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
+ peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: *counterparty_node_id,
+ action: msgs::ErrorAction::SendWarningMessage {
+ msg: msgs::WarningMessage {
+ channel_id: msg.channel_id,
+ data: "You appear to be exhibiting LND bug 6039, we'll keep sending you shutdown messages until you handle them correctly".to_owned()
+ },
+ log_level: Level::Trace,
+ }
+ });
+ // This can happen in a fairly tight loop, so we absolutely cannot trigger
+ // a `ChannelManager` write here.
+ return NotifyOption::SkipPersistHandleEvents;
}
- });
- }
+ NotifyOption::SkipPersistNoEvents
+ }
+ );
}
return;
}
_ => {}
}
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
+
if msg.channel_id.is_zero() {
let channel_ids: Vec<ChannelId> = {
let per_peer_state = self.per_peer_state.read().unwrap();
},
(2, ReceiveKeysend) => {
(0, payment_preimage, required),
+ (1, requires_blinded_error, (default_value, false)),
(2, incoming_cltv_expiry, required),
(3, payment_metadata, option),
(4, payment_data, option), // Added in 0.0.116
// don't remember in the `ChannelMonitor` where we got a preimage from, but if the
// channel is closed we just assume that it probably came from an on-chain claim.
channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), None,
- downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id);
+ downstream_closed, true, downstream_node_id, downstream_funding,
+ downstream_channel_id, None
+ );
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
//! for more info).
//! - `Keysend` - send funds to a node without an invoice
//! (see the [`Keysend` feature assignment proposal](https://github.com/lightning/bolts/issues/605#issuecomment-606679798) for more information).
+//! - `Trampoline` - supports receiving and forwarding Trampoline payments
+//! (see the [`Trampoline` feature proposal](https://github.com/lightning/bolts/pull/836) for more information).
//!
//! LDK knows about the following features, but does not support them:
//! - `AnchorsNonzeroFeeHtlcTx` - the initial version of anchor outputs, which was later found to be
ChannelType | SCIDPrivacy,
// Byte 6
ZeroConf,
+ // Byte 7
+ Trampoline,
]);
define_context!(NodeContext, [
// Byte 0
ChannelType | SCIDPrivacy,
// Byte 6
ZeroConf | Keysend,
+ // Byte 7
+ Trampoline,
]);
define_context!(ChannelContext, []);
define_context!(Bolt11InvoiceContext, [
,
// Byte 6
PaymentMetadata,
+ // Byte 7
+ Trampoline,
]);
define_context!(OfferContext, []);
define_context!(InvoiceRequestContext, []);
define_feature!(55, Keysend, [NodeContext],
"Feature flags for keysend payments.", set_keysend_optional, set_keysend_required,
supports_keysend, requires_keysend);
+ define_feature!(57, Trampoline, [InitContext, NodeContext, Bolt11InvoiceContext],
+ "Feature flags for Trampoline routing.", set_trampoline_routing_optional, set_trampoline_routing_required,
+ supports_trampoline_routing, requires_trampoline_routing);
// Note: update the module-level docs when a new feature bit is added!
#[cfg(test)]
}
}
+/// Returns the total fee earned by this HTLC forward, in msat.
pub fn expect_payment_forwarded<CM: AChannelManager, H: NodeHolder<CM=CM>>(
event: Event, node: &H, prev_node: &H, next_node: &H, expected_fee: Option<u64>,
expected_extra_fees_msat: Option<u64>, upstream_force_closed: bool,
- downstream_force_closed: bool
-) {
+ downstream_force_closed: bool, allow_1_msat_fee_overpay: bool,
+) -> Option<u64> {
match event {
Event::PaymentForwarded {
- total_fee_earned_msat, prev_channel_id, claim_from_onchain_tx, next_channel_id,
- outbound_amount_forwarded_msat: _, skimmed_fee_msat
+ prev_channel_id, next_channel_id, prev_user_channel_id, next_user_channel_id,
+ total_fee_earned_msat, skimmed_fee_msat, claim_from_onchain_tx, ..
} => {
- assert_eq!(total_fee_earned_msat, expected_fee);
+ if allow_1_msat_fee_overpay {
+ // Aggregating fees for blinded paths may result in a rounding error, causing slight
+ // overpayment in fees.
+ let actual_fee = total_fee_earned_msat.unwrap();
+ let expected_fee = expected_fee.unwrap();
+ assert!(actual_fee == expected_fee || actual_fee == expected_fee + 1);
+ } else {
+ assert_eq!(total_fee_earned_msat, expected_fee);
+ }
// Check that the (knowingly) withheld amount is always less or equal to the expected
// overpaid amount.
assert!(skimmed_fee_msat == expected_extra_fees_msat);
if !upstream_force_closed {
// Is the event prev_channel_id in one of the channels between the two nodes?
- assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == prev_node.node().get_our_node_id() && x.channel_id == prev_channel_id.unwrap()));
+ assert!(node.node().list_channels().iter().any(|x|
+ x.counterparty.node_id == prev_node.node().get_our_node_id() &&
+ x.channel_id == prev_channel_id.unwrap() &&
+ x.user_channel_id == prev_user_channel_id.unwrap()
+ ));
}
// We check for force closures since a force closed channel is removed from the
// node's channel list
if !downstream_force_closed {
- assert!(node.node().list_channels().iter().any(|x| x.counterparty.node_id == next_node.node().get_our_node_id() && x.channel_id == next_channel_id.unwrap()));
+ // As documented, `next_user_channel_id` will only be `Some` if we didn't settle via an
+ // onchain transaction, just as the `total_fee_earned_msat` field. Rather than
+ // introducing yet another variable, we use the latter's state as a flag to detect
+ // this and only check if it's `Some`.
+ if total_fee_earned_msat.is_none() {
+ assert!(node.node().list_channels().iter().any(|x|
+ x.counterparty.node_id == next_node.node().get_our_node_id() &&
+ x.channel_id == next_channel_id.unwrap()
+ ));
+ } else {
+ assert!(node.node().list_channels().iter().any(|x|
+ x.counterparty.node_id == next_node.node().get_our_node_id() &&
+ x.channel_id == next_channel_id.unwrap() &&
+ x.user_channel_id == next_user_channel_id.unwrap()
+ ));
+ }
}
assert_eq!(claim_from_onchain_tx, downstream_force_closed);
+ total_fee_earned_msat
},
_ => panic!("Unexpected event"),
}
assert_eq!(events.len(), 1);
$crate::ln::functional_test_utils::expect_payment_forwarded(
events.pop().unwrap(), &$node, &$prev_node, &$next_node, $expected_fee, None,
- $upstream_force_closed, $downstream_force_closed
+ $upstream_force_closed, $downstream_force_closed, false
);
}
}
}
}
-pub fn do_pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_claimable_expected: bool, clear_recipient_events: bool, expected_preimage: Option<PaymentPreimage>, is_probe: bool) -> Option<Event> {
+pub struct PassAlongPathArgs<'a, 'b, 'c, 'd> {
+ pub origin_node: &'a Node<'b, 'c, 'd>,
+ pub expected_path: &'a [&'a Node<'b, 'c, 'd>],
+ pub recv_value: u64,
+ pub payment_hash: PaymentHash,
+ pub payment_secret: Option<PaymentSecret>,
+ pub event: MessageSendEvent,
+ pub payment_claimable_expected: bool,
+ pub clear_recipient_events: bool,
+ pub expected_preimage: Option<PaymentPreimage>,
+ pub is_probe: bool,
+}
+
+impl<'a, 'b, 'c, 'd> PassAlongPathArgs<'a, 'b, 'c, 'd> {
+ pub fn new(
+ origin_node: &'a Node<'b, 'c, 'd>, expected_path: &'a [&'a Node<'b, 'c, 'd>], recv_value: u64,
+ payment_hash: PaymentHash, event: MessageSendEvent,
+ ) -> Self {
+ Self {
+ origin_node, expected_path, recv_value, payment_hash, payment_secret: None, event,
+ payment_claimable_expected: true, clear_recipient_events: true, expected_preimage: None,
+ is_probe: false,
+ }
+ }
+ pub fn without_clearing_recipient_events(mut self) -> Self {
+ self.clear_recipient_events = false;
+ self
+ }
+ pub fn is_probe(mut self) -> Self {
+ self.payment_claimable_expected = false;
+ self.is_probe = true;
+ self
+ }
+ pub fn without_claimable_event(mut self) -> Self {
+ self.payment_claimable_expected = false;
+ self
+ }
+ pub fn with_payment_secret(mut self, payment_secret: PaymentSecret) -> Self {
+ self.payment_secret = Some(payment_secret);
+ self
+ }
+ pub fn with_payment_preimage(mut self, payment_preimage: PaymentPreimage) -> Self {
+ self.expected_preimage = Some(payment_preimage);
+ self
+ }
+}
+
+pub fn do_pass_along_path<'a, 'b, 'c>(args: PassAlongPathArgs) -> Option<Event> {
+ let PassAlongPathArgs {
+ origin_node, expected_path, recv_value, payment_hash: our_payment_hash,
+ payment_secret: our_payment_secret, event: ev, payment_claimable_expected,
+ clear_recipient_events, expected_preimage, is_probe
+ } = args;
+
let mut payment_event = SendEvent::from_event(ev);
let mut prev_node = origin_node;
let mut event = None;
}
pub fn pass_along_path<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_path: &[&Node<'a, 'b, 'c>], recv_value: u64, our_payment_hash: PaymentHash, our_payment_secret: Option<PaymentSecret>, ev: MessageSendEvent, payment_claimable_expected: bool, expected_preimage: Option<PaymentPreimage>) -> Option<Event> {
- do_pass_along_path(origin_node, expected_path, recv_value, our_payment_hash, our_payment_secret, ev, payment_claimable_expected, true, expected_preimage, false)
+ let mut args = PassAlongPathArgs::new(origin_node, expected_path, recv_value, our_payment_hash, ev);
+ if !payment_claimable_expected {
+ args = args.without_claimable_event();
+ }
+ if let Some(payment_secret) = our_payment_secret {
+ args = args.with_payment_secret(payment_secret);
+ }
+ if let Some(payment_preimage) = expected_preimage {
+ args = args.with_payment_preimage(payment_preimage);
+ }
+ do_pass_along_path(args)
}
pub fn send_probe_along_route<'a, 'b, 'c>(origin_node: &Node<'a, 'b, 'c>, expected_route: &[&[&Node<'a, 'b, 'c>]]) {
for path in expected_route.iter() {
let ev = remove_first_msg_event_to_node(&path[0].node.get_our_node_id(), &mut events);
- do_pass_along_path(origin_node, path, 0, PaymentHash([0_u8; 32]), None, ev, false, false, None, true);
+ do_pass_along_path(PassAlongPathArgs::new(origin_node, path, 0, PaymentHash([0_u8; 32]), ev)
+ .is_probe()
+ .without_clearing_recipient_events());
+
let nodes_to_fail_payment: Vec<_> = vec![origin_node].into_iter().chain(path.iter().cloned()).collect();
fail_payment_along_path(nodes_to_fail_payment.as_slice());
pub expected_min_htlc_overpay: Vec<u32>,
pub skip_last: bool,
pub payment_preimage: PaymentPreimage,
+ // Allow forwarding nodes to have taken 1 msat more fee than expected based on the downstream
+ // fulfill amount.
+ //
+ // Necessary because our test utils calculate the expected fee for an intermediate node based on
+ // the amount was claimed in their downstream peer's fulfill, but blinded intermediate nodes
+ // calculate their fee based on the inbound amount from their upstream peer, causing a difference
+ // in rounding.
+ pub allow_1_msat_fee_overpay: bool,
}
impl<'a, 'b, 'c, 'd> ClaimAlongRouteArgs<'a, 'b, 'c, 'd> {
Self {
origin_node, expected_paths, expected_extra_fees: vec![0; expected_paths.len()],
expected_min_htlc_overpay: vec![0; expected_paths.len()], skip_last: false, payment_preimage,
+ allow_1_msat_fee_overpay: false,
}
}
pub fn skip_last(mut self, skip_last: bool) -> Self {
self.expected_min_htlc_overpay = extra_fees;
self
}
+ pub fn allow_1_msat_fee_overpay(mut self) -> Self {
+ self.allow_1_msat_fee_overpay = true;
+ self
+ }
}
pub fn pass_claimed_payment_along_route<'a, 'b, 'c, 'd>(args: ClaimAlongRouteArgs) -> u64 {
let ClaimAlongRouteArgs {
origin_node, expected_paths, expected_extra_fees, expected_min_htlc_overpay, skip_last,
- payment_preimage: our_payment_preimage
+ payment_preimage: our_payment_preimage, allow_1_msat_fee_overpay,
} = args;
let claim_event = expected_paths[0].last().unwrap().node.get_and_clear_pending_events();
assert_eq!(claim_event.len(), 1);
+ #[allow(unused)]
+ let mut fwd_amt_msat = 0;
match claim_event[0] {
Event::PaymentClaimed {
purpose: PaymentPurpose::SpontaneousPayment(preimage),
assert_eq!(htlcs.len(), expected_paths.len()); // One per path.
assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::<u64>(), amount_msat);
expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc));
+ fwd_amt_msat = amount_msat;
},
Event::PaymentClaimed {
purpose: PaymentPurpose::InvoicePayment { .. },
assert_eq!(htlcs.len(), expected_paths.len()); // One per path.
assert_eq!(htlcs.iter().map(|h| h.value_msat).sum::<u64>(), amount_msat);
expected_paths.iter().zip(htlcs).for_each(|(path, htlc)| check_claimed_htlc_channel(origin_node, path, htlc));
+ fwd_amt_msat = amount_msat;
}
_ => panic!(),
}
per_path_msgs.push(msgs_from_ev!(&events[0]));
} else {
for expected_path in expected_paths.iter() {
- // For MPP payments, we always want the message to the first node in the path.
- let ev = remove_first_msg_event_to_node(&expected_path[0].node.get_our_node_id(), &mut events);
+ // For MPP payments, we want the fulfill message from the payee to the penultimate hop in the
+ // path.
+ let penultimate_hop_node_id = expected_path.iter().rev().skip(1).next()
+ .map(|n| n.node.get_our_node_id())
+ .unwrap_or(origin_node.node.get_our_node_id());
+ let ev = remove_first_msg_event_to_node(&penultimate_hop_node_id, &mut events);
per_path_msgs.push(msgs_from_ev!(&ev));
}
}
{
$node.node.handle_update_fulfill_htlc(&$prev_node.node.get_our_node_id(), &next_msgs.as_ref().unwrap().0);
let mut fee = {
- let per_peer_state = $node.node.per_peer_state.read().unwrap();
- let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
- .unwrap().lock().unwrap();
- let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
- if let Some(prev_config) = channel.context().prev_config() {
- prev_config.forwarding_fee_base_msat
- } else {
- channel.context().config().forwarding_fee_base_msat
- }
+ let (base_fee, prop_fee) = {
+ let per_peer_state = $node.node.per_peer_state.read().unwrap();
+ let peer_state = per_peer_state.get(&$prev_node.node.get_our_node_id())
+ .unwrap().lock().unwrap();
+ let channel = peer_state.channel_by_id.get(&next_msgs.as_ref().unwrap().0.channel_id).unwrap();
+ if let Some(prev_config) = channel.context().prev_config() {
+ (prev_config.forwarding_fee_base_msat as u64,
+ prev_config.forwarding_fee_proportional_millionths as u64)
+ } else {
+ (channel.context().config().forwarding_fee_base_msat as u64,
+ channel.context().config().forwarding_fee_proportional_millionths as u64)
+ }
+ };
+ ((fwd_amt_msat * prop_fee / 1_000_000) + base_fee) as u32
};
let mut expected_extra_fee = None;
}
let mut events = $node.node.get_and_clear_pending_events();
assert_eq!(events.len(), 1);
- expect_payment_forwarded(events.pop().unwrap(), *$node, $next_node, $prev_node,
- Some(fee as u64), expected_extra_fee, false, false);
- expected_total_fee_msat += fee as u64;
+ let actual_fee = expect_payment_forwarded(events.pop().unwrap(), *$node, $next_node, $prev_node,
+ Some(fee as u64), expected_extra_fee, false, false, allow_1_msat_fee_overpay);
+ expected_total_fee_msat += actual_fee.unwrap();
+ fwd_amt_msat += actual_fee.unwrap();
check_added_monitors!($node, 1);
let new_next_msgs = if $new_msgs {
let events = $node.node.get_and_clear_pending_msg_events();
}
check_added_monitors!(nodes[4], 1);
test_txn_broadcast(&nodes[4], &chan_4, None, HTLCType::SUCCESS);
- check_closed_event!(nodes[4], 1, ClosureReason::HolderForceClosed, [nodes[3].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[4], 1, ClosureReason::HTLCsTimedOut, [nodes[3].node.get_our_node_id()], 100000);
mine_transaction(&nodes[4], &node_txn[0]);
check_preimage_claim(&nodes[4], &node_txn);
assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon),
Ok(ChannelMonitorUpdateStatus::Completed));
- check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[3], 1, ClosureReason::HTLCsTimedOut, [nodes[4].node.get_our_node_id()], 100000);
}
#[test]
test_txn_broadcast(&nodes[1], &chan, None, if use_dust { HTLCType::NONE } else { HTLCType::SUCCESS });
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_current_remote_commitment_only(use_dust: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
}
fn do_htlc_claim_previous_remote_commitment_only(use_dust: bool, check_revoke_no_close: bool) {
test_txn_broadcast(&nodes[0], &chan, None, HTLCType::NONE);
check_closed_broadcast!(nodes[0], true);
check_added_monitors!(nodes[0], 1);
- check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
+ check_closed_event!(nodes[0], 1, ClosureReason::HTLCsTimedOut, [nodes[1].node.get_our_node_id()], 100000);
} else {
expect_payment_failed!(nodes[0], our_payment_hash, true);
}
let height = HTLC_TIMEOUT_BROADCAST + 1;
connect_blocks(&nodes[0], height - nodes[0].best_block_info().1);
check_closed_broadcast(&nodes[0], 1, true);
- check_closed_event!(&nodes[0], 1, ClosureReason::HolderForceClosed, false,
+ check_closed_event!(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false,
[nodes[1].node.get_our_node_id()], 100000);
watchtower_alice.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, vec![bob_state_y.clone()]), height);
check_added_monitors(&nodes[0], 1);
do_test_funding_and_commitment_tx_confirm_same_block(false);
do_test_funding_and_commitment_tx_confirm_same_block(true);
}
+
+#[test]
+fn test_accept_inbound_channel_errors_queued() {
+ // For manually accepted inbound channels, tests that a close error is correctly handled
+ // and the channel fails for the initiator.
+ let mut config0 = test_default_channel_config();
+ let mut config1 = config0.clone();
+ config1.channel_handshake_limits.their_to_self_delay = 1000;
+ config1.manually_accept_inbound_channels = true;
+ config0.channel_handshake_config.our_to_self_delay = 2000;
+
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config0), Some(config1)]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 23) {
+ Err(APIError::ChannelUnavailable { err: _ }) => (),
+ _ => panic!(),
+ }
+ }
+ _ => panic!("Unexpected event"),
+ }
+ assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
+ open_channel_msg.common_fields.temporary_channel_id);
+}
--- /dev/null
+// This file is Copyright its original authors, visible in version control
+// history.
+//
+// This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
+// or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
+// You may not use this file except in accordance with one or both of these
+// licenses.
+
+use crate::io_extras::sink;
+use crate::prelude::*;
+use core::ops::Deref;
+
+use bitcoin::blockdata::constants::WITNESS_SCALE_FACTOR;
+use bitcoin::consensus::Encodable;
+use bitcoin::policy::MAX_STANDARD_TX_WEIGHT;
+use bitcoin::{
+ absolute::LockTime as AbsoluteLockTime, OutPoint, Sequence, Transaction, TxIn, TxOut,
+};
+
+use crate::chain::chaininterface::fee_for_weight;
+use crate::events::bump_transaction::{BASE_INPUT_WEIGHT, EMPTY_SCRIPT_SIG_WEIGHT};
+use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
+use crate::ln::msgs::SerialId;
+use crate::ln::{msgs, ChannelId};
+use crate::sign::EntropySource;
+use crate::util::ser::TransactionU16LenLimited;
+
+/// The number of received `tx_add_input` messages during a negotiation at which point the
+/// negotiation MUST be failed.
+const MAX_RECEIVED_TX_ADD_INPUT_COUNT: u16 = 4096;
+
+/// The number of received `tx_add_output` messages during a negotiation at which point the
+/// negotiation MUST be failed.
+const MAX_RECEIVED_TX_ADD_OUTPUT_COUNT: u16 = 4096;
+
+/// The number of inputs or outputs that the state machine can have, before it MUST fail the
+/// negotiation.
+const MAX_INPUTS_OUTPUTS_COUNT: usize = 252;
+
+trait SerialIdExt {
+ fn is_for_initiator(&self) -> bool;
+ fn is_for_non_initiator(&self) -> bool;
+}
+
+impl SerialIdExt for SerialId {
+ fn is_for_initiator(&self) -> bool {
+ self % 2 == 0
+ }
+
+ fn is_for_non_initiator(&self) -> bool {
+ !self.is_for_initiator()
+ }
+}
+
+#[derive(Debug, Clone, PartialEq)]
+pub enum AbortReason {
+ InvalidStateTransition,
+ UnexpectedCounterpartyMessage,
+ ReceivedTooManyTxAddInputs,
+ ReceivedTooManyTxAddOutputs,
+ IncorrectInputSequenceValue,
+ IncorrectSerialIdParity,
+ SerialIdUnknown,
+ DuplicateSerialId,
+ PrevTxOutInvalid,
+ ExceededMaximumSatsAllowed,
+ ExceededNumberOfInputsOrOutputs,
+ TransactionTooLarge,
+ BelowDustLimit,
+ InvalidOutputScript,
+ InsufficientFees,
+ OutputsValueExceedsInputsValue,
+ InvalidTx,
+}
+
+#[derive(Debug)]
+pub struct TxInputWithPrevOutput {
+ input: TxIn,
+ prev_output: TxOut,
+}
+
+#[derive(Debug)]
+struct NegotiationContext {
+ holder_is_initiator: bool,
+ received_tx_add_input_count: u16,
+ received_tx_add_output_count: u16,
+ inputs: HashMap<SerialId, TxInputWithPrevOutput>,
+ prevtx_outpoints: HashSet<OutPoint>,
+ outputs: HashMap<SerialId, TxOut>,
+ tx_locktime: AbsoluteLockTime,
+ feerate_sat_per_kw: u32,
+ to_remote_value_satoshis: u64,
+}
+
+impl NegotiationContext {
+ fn is_serial_id_valid_for_counterparty(&self, serial_id: &SerialId) -> bool {
+ // A received `SerialId`'s parity must match the role of the counterparty.
+ self.holder_is_initiator == serial_id.is_for_non_initiator()
+ }
+
+ fn total_input_and_output_count(&self) -> usize {
+ self.inputs.len().saturating_add(self.outputs.len())
+ }
+
+ fn counterparty_inputs_contributed(
+ &self,
+ ) -> impl Iterator<Item = &TxInputWithPrevOutput> + Clone {
+ self.inputs
+ .iter()
+ .filter(move |(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
+ .map(|(_, input_with_prevout)| input_with_prevout)
+ }
+
+ fn counterparty_outputs_contributed(&self) -> impl Iterator<Item = &TxOut> + Clone {
+ self.outputs
+ .iter()
+ .filter(move |(serial_id, _)| self.is_serial_id_valid_for_counterparty(serial_id))
+ .map(|(_, output)| output)
+ }
+
+ fn received_tx_add_input(&mut self, msg: &msgs::TxAddInput) -> Result<(), AbortReason> {
+ // The interactive-txs spec calls for us to fail negotiation if the `prevtx` we receive is
+ // invalid. However, we would not need to account for this explicit negotiation failure
+ // mode here since `PeerManager` would already disconnect the peer if the `prevtx` is
+ // invalid; implicitly ending the negotiation.
+
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `serial_id` has the wrong parity
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+
+ self.received_tx_add_input_count += 1;
+ if self.received_tx_add_input_count > MAX_RECEIVED_TX_ADD_INPUT_COUNT {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - if has received 4096 `tx_add_input` messages during this negotiation
+ return Err(AbortReason::ReceivedTooManyTxAddInputs);
+ }
+
+ if msg.sequence >= 0xFFFFFFFE {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - `sequence` is set to `0xFFFFFFFE` or `0xFFFFFFFF`
+ return Err(AbortReason::IncorrectInputSequenceValue);
+ }
+
+ let transaction = msg.prevtx.as_transaction();
+ let txid = transaction.txid();
+
+ if let Some(tx_out) = transaction.output.get(msg.prevtx_out as usize) {
+ if !tx_out.script_pubkey.is_witness_program() {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `scriptPubKey` is not a witness program
+ return Err(AbortReason::PrevTxOutInvalid);
+ }
+
+ if !self.prevtx_outpoints.insert(OutPoint { txid, vout: msg.prevtx_out }) {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `prevtx` and `prevtx_vout` are identical to a previously added
+ // (and not removed) input's
+ return Err(AbortReason::PrevTxOutInvalid);
+ }
+ } else {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - `prevtx_vout` is greater or equal to the number of outputs on `prevtx`
+ return Err(AbortReason::PrevTxOutInvalid);
+ }
+
+ let prev_out = if let Some(prev_out) = transaction.output.get(msg.prevtx_out as usize) {
+ prev_out.clone()
+ } else {
+ return Err(AbortReason::PrevTxOutInvalid);
+ };
+ if self.inputs.iter().any(|(serial_id, _)| *serial_id == msg.serial_id) {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `serial_id` is already included in the transaction
+ return Err(AbortReason::DuplicateSerialId);
+ }
+ let prev_outpoint = OutPoint { txid, vout: msg.prevtx_out };
+ self.inputs.entry(msg.serial_id).or_insert_with(|| TxInputWithPrevOutput {
+ input: TxIn {
+ previous_output: prev_outpoint.clone(),
+ sequence: Sequence(msg.sequence),
+ ..Default::default()
+ },
+ prev_output: prev_out,
+ });
+ self.prevtx_outpoints.insert(prev_outpoint);
+ Ok(())
+ }
+
+ fn received_tx_remove_input(&mut self, msg: &msgs::TxRemoveInput) -> Result<(), AbortReason> {
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+
+ self.inputs
+ .remove(&msg.serial_id)
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the input or output identified by the `serial_id` was not added by the sender
+ // - the `serial_id` does not correspond to a currently added input
+ .ok_or(AbortReason::SerialIdUnknown)
+ .map(|_| ())
+ }
+
+ fn received_tx_add_output(&mut self, msg: &msgs::TxAddOutput) -> Result<(), AbortReason> {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the serial_id has the wrong parity
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+
+ self.received_tx_add_output_count += 1;
+ if self.received_tx_add_output_count > MAX_RECEIVED_TX_ADD_OUTPUT_COUNT {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - if has received 4096 `tx_add_output` messages during this negotiation
+ return Err(AbortReason::ReceivedTooManyTxAddOutputs);
+ }
+
+ if msg.sats < msg.script.dust_value().to_sat() {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the sats amount is less than the dust_limit
+ return Err(AbortReason::BelowDustLimit);
+ }
+
+ // Check that adding this output would not cause the total output value to exceed the total
+ // bitcoin supply.
+ let mut outputs_value: u64 = 0;
+ for output in self.outputs.iter() {
+ outputs_value = outputs_value.saturating_add(output.1.value);
+ }
+ if outputs_value.saturating_add(msg.sats) > TOTAL_BITCOIN_SUPPLY_SATOSHIS {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the sats amount is greater than 2,100,000,000,000,000 (TOTAL_BITCOIN_SUPPLY_SATOSHIS)
+ return Err(AbortReason::ExceededMaximumSatsAllowed);
+ }
+
+ // The receiving node:
+ // - MUST accept P2WSH, P2WPKH, P2TR scripts
+ // - MAY fail the negotiation if script is non-standard
+ //
+ // We can actually be a bit looser than the above as only witness version 0 has special
+ // length-based standardness constraints to match similar consensus rules. All witness scripts
+ // with witness versions V1 and up are always considered standard. Yes, the scripts can be
+ // anyone-can-spend-able, but if our counterparty wants to add an output like that then it's none
+ // of our concern really ¯\_(ツ)_/¯
+ if !msg.script.is_v0_p2wpkh()
+ && !msg.script.is_v0_p2wsh()
+ && msg.script.witness_version().map(|v| v.to_num() < 1).unwrap_or(true)
+ {
+ return Err(AbortReason::InvalidOutputScript);
+ }
+
+ if self.outputs.iter().any(|(serial_id, _)| *serial_id == msg.serial_id) {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the `serial_id` is already included in the transaction
+ return Err(AbortReason::DuplicateSerialId);
+ }
+
+ let output = TxOut { value: msg.sats, script_pubkey: msg.script.clone() };
+ self.outputs.entry(msg.serial_id).or_insert(output);
+ Ok(())
+ }
+
+ fn received_tx_remove_output(&mut self, msg: &msgs::TxRemoveOutput) -> Result<(), AbortReason> {
+ if !self.is_serial_id_valid_for_counterparty(&msg.serial_id) {
+ return Err(AbortReason::IncorrectSerialIdParity);
+ }
+ if let Some(_) = self.outputs.remove(&msg.serial_id) {
+ Ok(())
+ } else {
+ // The receiving node:
+ // - MUST fail the negotiation if:
+ // - the input or output identified by the `serial_id` was not added by the sender
+ // - the `serial_id` does not correspond to a currently added input
+ Err(AbortReason::SerialIdUnknown)
+ }
+ }
+
+ fn sent_tx_add_input(&mut self, msg: &msgs::TxAddInput) {
+ let tx = msg.prevtx.as_transaction();
+ let input = TxIn {
+ previous_output: OutPoint { txid: tx.txid(), vout: msg.prevtx_out },
+ sequence: Sequence(msg.sequence),
+ ..Default::default()
+ };
+ debug_assert!((msg.prevtx_out as usize) < tx.output.len());
+ let prev_output = &tx.output[msg.prevtx_out as usize];
+ self.prevtx_outpoints.insert(input.previous_output.clone());
+ self.inputs.insert(
+ msg.serial_id,
+ TxInputWithPrevOutput { input, prev_output: prev_output.clone() },
+ );
+ }
+
+ fn sent_tx_add_output(&mut self, msg: &msgs::TxAddOutput) {
+ self.outputs
+ .insert(msg.serial_id, TxOut { value: msg.sats, script_pubkey: msg.script.clone() });
+ }
+
+ fn sent_tx_remove_input(&mut self, msg: &msgs::TxRemoveInput) {
+ self.inputs.remove(&msg.serial_id);
+ }
+
+ fn sent_tx_remove_output(&mut self, msg: &msgs::TxRemoveOutput) {
+ self.outputs.remove(&msg.serial_id);
+ }
+
+ fn build_transaction(self) -> Result<Transaction, AbortReason> {
+ // The receiving node:
+ // MUST fail the negotiation if:
+
+ // - the peer's total input satoshis is less than their outputs
+ let mut counterparty_inputs_value: u64 = 0;
+ let mut counterparty_outputs_value: u64 = 0;
+ for input in self.counterparty_inputs_contributed() {
+ counterparty_inputs_value =
+ counterparty_inputs_value.saturating_add(input.prev_output.value);
+ }
+ for output in self.counterparty_outputs_contributed() {
+ counterparty_outputs_value = counterparty_outputs_value.saturating_add(output.value);
+ }
+ // ...actually the counterparty might be splicing out, so that their balance also contributes
+ // to the total input value.
+ if counterparty_inputs_value.saturating_add(self.to_remote_value_satoshis)
+ < counterparty_outputs_value
+ {
+ return Err(AbortReason::OutputsValueExceedsInputsValue);
+ }
+
+ // - there are more than 252 inputs
+ // - there are more than 252 outputs
+ if self.inputs.len() > MAX_INPUTS_OUTPUTS_COUNT
+ || self.outputs.len() > MAX_INPUTS_OUTPUTS_COUNT
+ {
+ return Err(AbortReason::ExceededNumberOfInputsOrOutputs);
+ }
+
+ // TODO: How do we enforce their fees cover the witness without knowing its expected length?
+ const INPUT_WEIGHT: u64 = BASE_INPUT_WEIGHT + EMPTY_SCRIPT_SIG_WEIGHT;
+
+ // - the peer's paid feerate does not meet or exceed the agreed feerate (based on the minimum fee).
+ let counterparty_output_weight_contributed: u64 = self
+ .counterparty_outputs_contributed()
+ .map(|output| {
+ (8 /* value */ + output.script_pubkey.consensus_encode(&mut sink()).unwrap() as u64)
+ * WITNESS_SCALE_FACTOR as u64
+ })
+ .sum();
+ let counterparty_weight_contributed = counterparty_output_weight_contributed
+ + self.counterparty_inputs_contributed().count() as u64 * INPUT_WEIGHT;
+ let counterparty_fees_contributed =
+ counterparty_inputs_value.saturating_sub(counterparty_outputs_value);
+ let mut required_counterparty_contribution_fee =
+ fee_for_weight(self.feerate_sat_per_kw, counterparty_weight_contributed);
+ if !self.holder_is_initiator {
+ // if is the non-initiator:
+ // - the initiator's fees do not cover the common fields (version, segwit marker + flag,
+ // input count, output count, locktime)
+ let tx_common_fields_weight =
+ (4 /* version */ + 4 /* locktime */ + 1 /* input count */ + 1 /* output count */) *
+ WITNESS_SCALE_FACTOR as u64 + 2 /* segwit marker + flag */;
+ let tx_common_fields_fee =
+ fee_for_weight(self.feerate_sat_per_kw, tx_common_fields_weight);
+ required_counterparty_contribution_fee += tx_common_fields_fee;
+ }
+ if counterparty_fees_contributed < required_counterparty_contribution_fee {
+ return Err(AbortReason::InsufficientFees);
+ }
+
+ // Inputs and outputs must be sorted by serial_id
+ let mut inputs = self.inputs.into_iter().collect::<Vec<_>>();
+ let mut outputs = self.outputs.into_iter().collect::<Vec<_>>();
+ inputs.sort_unstable_by_key(|(serial_id, _)| *serial_id);
+ outputs.sort_unstable_by_key(|(serial_id, _)| *serial_id);
+
+ let tx_to_validate = Transaction {
+ version: 2,
+ lock_time: self.tx_locktime,
+ input: inputs.into_iter().map(|(_, input)| input.input).collect(),
+ output: outputs.into_iter().map(|(_, output)| output).collect(),
+ };
+ if tx_to_validate.weight().to_wu() > MAX_STANDARD_TX_WEIGHT as u64 {
+ return Err(AbortReason::TransactionTooLarge);
+ }
+
+ Ok(tx_to_validate)
+ }
+}
+
+// The interactive transaction construction protocol allows two peers to collaboratively build a
+// transaction for broadcast.
+//
+// The protocol is turn-based, so we define different states here that we store depending on whose
+// turn it is to send the next message. The states are defined so that their types ensure we only
+// perform actions (only send messages) via defined state transitions that do not violate the
+// protocol.
+//
+// An example of a full negotiation and associated states follows:
+//
+// +------------+ +------------------+---- Holder state after message sent/received ----+
+// | |--(1)- tx_add_input ---->| | SentChangeMsg +
+// | |<-(2)- tx_complete ------| | ReceivedTxComplete +
+// | |--(3)- tx_add_output --->| | SentChangeMsg +
+// | |<-(4)- tx_complete ------| | ReceivedTxComplete +
+// | |--(5)- tx_add_input ---->| | SentChangeMsg +
+// | Holder |<-(6)- tx_add_input -----| Counterparty | ReceivedChangeMsg +
+// | |--(7)- tx_remove_output >| | SentChangeMsg +
+// | |<-(8)- tx_add_output ----| | ReceivedChangeMsg +
+// | |--(9)- tx_complete ----->| | SentTxComplete +
+// | |<-(10) tx_complete ------| | NegotiationComplete +
+// +------------+ +------------------+--------------------------------------------------+
+
+/// Negotiation states that can send & receive `tx_(add|remove)_(input|output)` and `tx_complete`
+trait State {}
+
+/// Category of states where we have sent some message to the counterparty, and we are waiting for
+/// a response.
+trait SentMsgState: State {
+ fn into_negotiation_context(self) -> NegotiationContext;
+}
+
+/// Category of states that our counterparty has put us in after we receive a message from them.
+trait ReceivedMsgState: State {
+ fn into_negotiation_context(self) -> NegotiationContext;
+}
+
+// This macro is a helper for implementing the above state traits for various states subsequently
+// defined below the macro.
+macro_rules! define_state {
+ (SENT_MSG_STATE, $state: ident, $doc: expr) => {
+ define_state!($state, NegotiationContext, $doc);
+ impl SentMsgState for $state {
+ fn into_negotiation_context(self) -> NegotiationContext {
+ self.0
+ }
+ }
+ };
+ (RECEIVED_MSG_STATE, $state: ident, $doc: expr) => {
+ define_state!($state, NegotiationContext, $doc);
+ impl ReceivedMsgState for $state {
+ fn into_negotiation_context(self) -> NegotiationContext {
+ self.0
+ }
+ }
+ };
+ ($state: ident, $inner: ident, $doc: expr) => {
+ #[doc = $doc]
+ #[derive(Debug)]
+ struct $state($inner);
+ impl State for $state {}
+ };
+}
+
+define_state!(
+ SENT_MSG_STATE,
+ SentChangeMsg,
+ "We have sent a message to the counterparty that has affected our negotiation state."
+);
+define_state!(
+ SENT_MSG_STATE,
+ SentTxComplete,
+ "We have sent a `tx_complete` message and are awaiting the counterparty's."
+);
+define_state!(
+ RECEIVED_MSG_STATE,
+ ReceivedChangeMsg,
+ "We have received a message from the counterparty that has affected our negotiation state."
+);
+define_state!(
+ RECEIVED_MSG_STATE,
+ ReceivedTxComplete,
+ "We have received a `tx_complete` message and the counterparty is awaiting ours."
+);
+define_state!(NegotiationComplete, Transaction, "We have exchanged consecutive `tx_complete` messages with the counterparty and the transaction negotiation is complete.");
+define_state!(
+ NegotiationAborted,
+ AbortReason,
+ "The negotiation has failed and cannot be continued."
+);
+
+type StateTransitionResult<S> = Result<S, AbortReason>;
+
+trait StateTransition<NewState: State, TransitionData> {
+ fn transition(self, data: TransitionData) -> StateTransitionResult<NewState>;
+}
+
+// This macro helps define the legal transitions between the states above by implementing
+// the `StateTransition` trait for each of the states that follow this declaration.
+macro_rules! define_state_transitions {
+ (SENT_MSG_STATE, [$(DATA $data: ty, TRANSITION $transition: ident),+]) => {
+ $(
+ impl<S: SentMsgState> StateTransition<ReceivedChangeMsg, $data> for S {
+ fn transition(self, data: $data) -> StateTransitionResult<ReceivedChangeMsg> {
+ let mut context = self.into_negotiation_context();
+ context.$transition(data)?;
+ Ok(ReceivedChangeMsg(context))
+ }
+ }
+ )*
+ };
+ (RECEIVED_MSG_STATE, [$(DATA $data: ty, TRANSITION $transition: ident),+]) => {
+ $(
+ impl<S: ReceivedMsgState> StateTransition<SentChangeMsg, $data> for S {
+ fn transition(self, data: $data) -> StateTransitionResult<SentChangeMsg> {
+ let mut context = self.into_negotiation_context();
+ context.$transition(data);
+ Ok(SentChangeMsg(context))
+ }
+ }
+ )*
+ };
+ (TX_COMPLETE, $from_state: ident, $tx_complete_state: ident) => {
+ impl StateTransition<NegotiationComplete, &msgs::TxComplete> for $tx_complete_state {
+ fn transition(self, _data: &msgs::TxComplete) -> StateTransitionResult<NegotiationComplete> {
+ let context = self.into_negotiation_context();
+ let tx = context.build_transaction()?;
+ Ok(NegotiationComplete(tx))
+ }
+ }
+
+ impl StateTransition<$tx_complete_state, &msgs::TxComplete> for $from_state {
+ fn transition(self, _data: &msgs::TxComplete) -> StateTransitionResult<$tx_complete_state> {
+ Ok($tx_complete_state(self.into_negotiation_context()))
+ }
+ }
+ };
+}
+
+// State transitions when we have sent our counterparty some messages and are waiting for them
+// to respond.
+define_state_transitions!(SENT_MSG_STATE, [
+ DATA &msgs::TxAddInput, TRANSITION received_tx_add_input,
+ DATA &msgs::TxRemoveInput, TRANSITION received_tx_remove_input,
+ DATA &msgs::TxAddOutput, TRANSITION received_tx_add_output,
+ DATA &msgs::TxRemoveOutput, TRANSITION received_tx_remove_output
+]);
+// State transitions when we have received some messages from our counterparty and we should
+// respond.
+define_state_transitions!(RECEIVED_MSG_STATE, [
+ DATA &msgs::TxAddInput, TRANSITION sent_tx_add_input,
+ DATA &msgs::TxRemoveInput, TRANSITION sent_tx_remove_input,
+ DATA &msgs::TxAddOutput, TRANSITION sent_tx_add_output,
+ DATA &msgs::TxRemoveOutput, TRANSITION sent_tx_remove_output
+]);
+define_state_transitions!(TX_COMPLETE, SentChangeMsg, ReceivedTxComplete);
+define_state_transitions!(TX_COMPLETE, ReceivedChangeMsg, SentTxComplete);
+
+#[derive(Debug)]
+enum StateMachine {
+ Indeterminate,
+ SentChangeMsg(SentChangeMsg),
+ ReceivedChangeMsg(ReceivedChangeMsg),
+ SentTxComplete(SentTxComplete),
+ ReceivedTxComplete(ReceivedTxComplete),
+ NegotiationComplete(NegotiationComplete),
+ NegotiationAborted(NegotiationAborted),
+}
+
+impl Default for StateMachine {
+ fn default() -> Self {
+ Self::Indeterminate
+ }
+}
+
+// The `StateMachine` internally executes the actual transition between two states and keeps
+// track of the current state. This macro defines _how_ those state transitions happen to
+// update the internal state.
+macro_rules! define_state_machine_transitions {
+ ($transition: ident, $msg: ty, [$(FROM $from_state: ident, TO $to_state: ident),+]) => {
+ fn $transition(self, msg: $msg) -> StateMachine {
+ match self {
+ $(
+ Self::$from_state(s) => match s.transition(msg) {
+ Ok(new_state) => StateMachine::$to_state(new_state),
+ Err(abort_reason) => StateMachine::NegotiationAborted(NegotiationAborted(abort_reason)),
+ }
+ )*
+ _ => StateMachine::NegotiationAborted(NegotiationAborted(AbortReason::UnexpectedCounterpartyMessage)),
+ }
+ }
+ };
+}
+
+impl StateMachine {
+ fn new(
+ feerate_sat_per_kw: u32, is_initiator: bool, tx_locktime: AbsoluteLockTime,
+ to_remote_value_satoshis: u64,
+ ) -> Self {
+ let context = NegotiationContext {
+ tx_locktime,
+ holder_is_initiator: is_initiator,
+ received_tx_add_input_count: 0,
+ received_tx_add_output_count: 0,
+ inputs: new_hash_map(),
+ prevtx_outpoints: new_hash_set(),
+ outputs: new_hash_map(),
+ feerate_sat_per_kw,
+ to_remote_value_satoshis,
+ };
+ if is_initiator {
+ Self::ReceivedChangeMsg(ReceivedChangeMsg(context))
+ } else {
+ Self::SentChangeMsg(SentChangeMsg(context))
+ }
+ }
+
+ // TxAddInput
+ define_state_machine_transitions!(sent_tx_add_input, &msgs::TxAddInput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_add_input, &msgs::TxAddInput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxAddOutput
+ define_state_machine_transitions!(sent_tx_add_output, &msgs::TxAddOutput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_add_output, &msgs::TxAddOutput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxRemoveInput
+ define_state_machine_transitions!(sent_tx_remove_input, &msgs::TxRemoveInput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_remove_input, &msgs::TxRemoveInput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxRemoveOutput
+ define_state_machine_transitions!(sent_tx_remove_output, &msgs::TxRemoveOutput, [
+ FROM ReceivedChangeMsg, TO SentChangeMsg,
+ FROM ReceivedTxComplete, TO SentChangeMsg
+ ]);
+ define_state_machine_transitions!(received_tx_remove_output, &msgs::TxRemoveOutput, [
+ FROM SentChangeMsg, TO ReceivedChangeMsg,
+ FROM SentTxComplete, TO ReceivedChangeMsg
+ ]);
+
+ // TxComplete
+ define_state_machine_transitions!(sent_tx_complete, &msgs::TxComplete, [
+ FROM ReceivedChangeMsg, TO SentTxComplete,
+ FROM ReceivedTxComplete, TO NegotiationComplete
+ ]);
+ define_state_machine_transitions!(received_tx_complete, &msgs::TxComplete, [
+ FROM SentChangeMsg, TO ReceivedTxComplete,
+ FROM SentTxComplete, TO NegotiationComplete
+ ]);
+}
+
+pub struct InteractiveTxConstructor {
+ state_machine: StateMachine,
+ channel_id: ChannelId,
+ inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)>,
+ outputs_to_contribute: Vec<(SerialId, TxOut)>,
+}
+
+pub enum InteractiveTxMessageSend {
+ TxAddInput(msgs::TxAddInput),
+ TxAddOutput(msgs::TxAddOutput),
+ TxComplete(msgs::TxComplete),
+}
+
+// This macro executes a state machine transition based on a provided action.
+macro_rules! do_state_transition {
+ ($self: ident, $transition: ident, $msg: expr) => {{
+ let state_machine = core::mem::take(&mut $self.state_machine);
+ $self.state_machine = state_machine.$transition($msg);
+ match &$self.state_machine {
+ StateMachine::NegotiationAborted(state) => Err(state.0.clone()),
+ _ => Ok(()),
+ }
+ }};
+}
+
+fn generate_holder_serial_id<ES: Deref>(entropy_source: &ES, is_initiator: bool) -> SerialId
+where
+ ES::Target: EntropySource,
+{
+ let rand_bytes = entropy_source.get_secure_random_bytes();
+ let mut serial_id_bytes = [0u8; 8];
+ serial_id_bytes.copy_from_slice(&rand_bytes[..8]);
+ let mut serial_id = u64::from_be_bytes(serial_id_bytes);
+ if serial_id.is_for_initiator() != is_initiator {
+ serial_id ^= 1;
+ }
+ serial_id
+}
+
+pub enum HandleTxCompleteValue {
+ SendTxMessage(InteractiveTxMessageSend),
+ SendTxComplete(InteractiveTxMessageSend, Transaction),
+ NegotiationComplete(Transaction),
+}
+
+impl InteractiveTxConstructor {
+ /// Instantiates a new `InteractiveTxConstructor`.
+ ///
+ /// If this is for a dual_funded channel then the `to_remote_value_satoshis` parameter should be set
+ /// to zero.
+ ///
+ /// A tuple is returned containing the newly instantiate `InteractiveTxConstructor` and optionally
+ /// an initial wrapped `Tx_` message which the holder needs to send to the counterparty.
+ pub fn new<ES: Deref>(
+ entropy_source: &ES, channel_id: ChannelId, feerate_sat_per_kw: u32, is_initiator: bool,
+ funding_tx_locktime: AbsoluteLockTime,
+ inputs_to_contribute: Vec<(TxIn, TransactionU16LenLimited)>,
+ outputs_to_contribute: Vec<TxOut>, to_remote_value_satoshis: u64,
+ ) -> (Self, Option<InteractiveTxMessageSend>)
+ where
+ ES::Target: EntropySource,
+ {
+ let state_machine = StateMachine::new(
+ feerate_sat_per_kw,
+ is_initiator,
+ funding_tx_locktime,
+ to_remote_value_satoshis,
+ );
+ let mut inputs_to_contribute: Vec<(SerialId, TxIn, TransactionU16LenLimited)> =
+ inputs_to_contribute
+ .into_iter()
+ .map(|(input, tx)| {
+ let serial_id = generate_holder_serial_id(entropy_source, is_initiator);
+ (serial_id, input, tx)
+ })
+ .collect();
+ // We'll sort by the randomly generated serial IDs, effectively shuffling the order of the inputs
+ // as the user passed them to us to avoid leaking any potential categorization of transactions
+ // before we pass any of the inputs to the counterparty.
+ inputs_to_contribute.sort_unstable_by_key(|(serial_id, _, _)| *serial_id);
+ let mut outputs_to_contribute: Vec<(SerialId, TxOut)> = outputs_to_contribute
+ .into_iter()
+ .map(|output| {
+ let serial_id = generate_holder_serial_id(entropy_source, is_initiator);
+ (serial_id, output)
+ })
+ .collect();
+ // In the same manner and for the same rationale as the inputs above, we'll shuffle the outputs.
+ outputs_to_contribute.sort_unstable_by_key(|(serial_id, _)| *serial_id);
+ let mut constructor =
+ Self { state_machine, channel_id, inputs_to_contribute, outputs_to_contribute };
+ let message_send = if is_initiator {
+ match constructor.maybe_send_message() {
+ Ok(msg_send) => Some(msg_send),
+ Err(_) => {
+ debug_assert!(
+ false,
+ "We should always be able to start our state machine successfully"
+ );
+ None
+ },
+ }
+ } else {
+ None
+ };
+ (constructor, message_send)
+ }
+
+ fn maybe_send_message(&mut self) -> Result<InteractiveTxMessageSend, AbortReason> {
+ // We first attempt to send inputs we want to add, then outputs. Once we are done sending
+ // them both, then we always send tx_complete.
+ if let Some((serial_id, input, prevtx)) = self.inputs_to_contribute.pop() {
+ let msg = msgs::TxAddInput {
+ channel_id: self.channel_id,
+ serial_id,
+ prevtx,
+ prevtx_out: input.previous_output.vout,
+ sequence: input.sequence.to_consensus_u32(),
+ };
+ do_state_transition!(self, sent_tx_add_input, &msg)?;
+ Ok(InteractiveTxMessageSend::TxAddInput(msg))
+ } else if let Some((serial_id, output)) = self.outputs_to_contribute.pop() {
+ let msg = msgs::TxAddOutput {
+ channel_id: self.channel_id,
+ serial_id,
+ sats: output.value,
+ script: output.script_pubkey,
+ };
+ do_state_transition!(self, sent_tx_add_output, &msg)?;
+ Ok(InteractiveTxMessageSend::TxAddOutput(msg))
+ } else {
+ let msg = msgs::TxComplete { channel_id: self.channel_id };
+ do_state_transition!(self, sent_tx_complete, &msg)?;
+ Ok(InteractiveTxMessageSend::TxComplete(msg))
+ }
+ }
+
+ pub fn handle_tx_add_input(
+ &mut self, msg: &msgs::TxAddInput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_add_input, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_remove_input(
+ &mut self, msg: &msgs::TxRemoveInput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_remove_input, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_add_output(
+ &mut self, msg: &msgs::TxAddOutput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_add_output, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_remove_output(
+ &mut self, msg: &msgs::TxRemoveOutput,
+ ) -> Result<InteractiveTxMessageSend, AbortReason> {
+ do_state_transition!(self, received_tx_remove_output, msg)?;
+ self.maybe_send_message()
+ }
+
+ pub fn handle_tx_complete(
+ &mut self, msg: &msgs::TxComplete,
+ ) -> Result<HandleTxCompleteValue, AbortReason> {
+ do_state_transition!(self, received_tx_complete, msg)?;
+ match &self.state_machine {
+ StateMachine::ReceivedTxComplete(_) => {
+ let msg_send = self.maybe_send_message()?;
+ return match &self.state_machine {
+ StateMachine::NegotiationComplete(s) => {
+ Ok(HandleTxCompleteValue::SendTxComplete(msg_send, s.0.clone()))
+ },
+ StateMachine::SentChangeMsg(_) => {
+ Ok(HandleTxCompleteValue::SendTxMessage(msg_send))
+ }, // We either had an input or output to contribute.
+ _ => {
+ debug_assert!(false, "We cannot transition to any other states after receiving `tx_complete` and responding");
+ return Err(AbortReason::InvalidStateTransition);
+ },
+ };
+ },
+ StateMachine::NegotiationComplete(s) => {
+ Ok(HandleTxCompleteValue::NegotiationComplete(s.0.clone()))
+ },
+ _ => {
+ debug_assert!(
+ false,
+ "We cannot transition to any other states after receiving `tx_complete`"
+ );
+ Err(AbortReason::InvalidStateTransition)
+ },
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::chain::chaininterface::FEERATE_FLOOR_SATS_PER_KW;
+ use crate::ln::channel::TOTAL_BITCOIN_SUPPLY_SATOSHIS;
+ use crate::ln::interactivetxs::{
+ generate_holder_serial_id, AbortReason, HandleTxCompleteValue, InteractiveTxConstructor,
+ InteractiveTxMessageSend, MAX_INPUTS_OUTPUTS_COUNT, MAX_RECEIVED_TX_ADD_INPUT_COUNT,
+ MAX_RECEIVED_TX_ADD_OUTPUT_COUNT,
+ };
+ use crate::ln::ChannelId;
+ use crate::sign::EntropySource;
+ use crate::util::atomic_counter::AtomicCounter;
+ use crate::util::ser::TransactionU16LenLimited;
+ use bitcoin::blockdata::opcodes;
+ use bitcoin::blockdata::script::Builder;
+ use bitcoin::{
+ absolute::LockTime as AbsoluteLockTime, OutPoint, Sequence, Transaction, TxIn, TxOut,
+ };
+ use core::ops::Deref;
+
+ // A simple entropy source that works based on an atomic counter.
+ struct TestEntropySource(AtomicCounter);
+ impl EntropySource for TestEntropySource {
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ let mut res = [0u8; 32];
+ let increment = self.0.get_increment();
+ for i in 0..32 {
+ // Rotate the increment value by 'i' bits to the right, to avoid clashes
+ // when `generate_local_serial_id` does a parity flip on consecutive calls for the
+ // same party.
+ let rotated_increment = increment.rotate_right(i as u32);
+ res[i] = (rotated_increment & 0xff) as u8;
+ }
+ res
+ }
+ }
+
+ // An entropy source that deliberately returns you the same seed every time. We use this
+ // to test if the constructor would catch inputs/outputs that are attempting to be added
+ // with duplicate serial ids.
+ struct DuplicateEntropySource;
+ impl EntropySource for DuplicateEntropySource {
+ fn get_secure_random_bytes(&self) -> [u8; 32] {
+ let mut res = [0u8; 32];
+ let count = 1u64;
+ res[0..8].copy_from_slice(&count.to_be_bytes());
+ res
+ }
+ }
+
+ #[derive(Debug, PartialEq, Eq)]
+ enum ErrorCulprit {
+ NodeA,
+ NodeB,
+ // Some error values are only checked at the end of the negotiation and are not easy to attribute
+ // to a particular party. Both parties would indicate an `AbortReason` in this case.
+ // e.g. Exceeded max inputs and outputs after negotiation.
+ Indeterminate,
+ }
+
+ struct TestSession {
+ inputs_a: Vec<(TxIn, TransactionU16LenLimited)>,
+ outputs_a: Vec<TxOut>,
+ inputs_b: Vec<(TxIn, TransactionU16LenLimited)>,
+ outputs_b: Vec<TxOut>,
+ expect_error: Option<(AbortReason, ErrorCulprit)>,
+ }
+
+ fn do_test_interactive_tx_constructor(session: TestSession) {
+ let entropy_source = TestEntropySource(AtomicCounter::new());
+ do_test_interactive_tx_constructor_internal(session, &&entropy_source);
+ }
+
+ fn do_test_interactive_tx_constructor_with_entropy_source<ES: Deref>(
+ session: TestSession, entropy_source: ES,
+ ) where
+ ES::Target: EntropySource,
+ {
+ do_test_interactive_tx_constructor_internal(session, &entropy_source);
+ }
+
+ fn do_test_interactive_tx_constructor_internal<ES: Deref>(
+ session: TestSession, entropy_source: &ES,
+ ) where
+ ES::Target: EntropySource,
+ {
+ let channel_id = ChannelId(entropy_source.get_secure_random_bytes());
+ let tx_locktime = AbsoluteLockTime::from_height(1337).unwrap();
+
+ let (mut constructor_a, first_message_a) = InteractiveTxConstructor::new(
+ entropy_source,
+ channel_id,
+ FEERATE_FLOOR_SATS_PER_KW * 10,
+ true,
+ tx_locktime,
+ session.inputs_a,
+ session.outputs_a,
+ 0,
+ );
+ let (mut constructor_b, first_message_b) = InteractiveTxConstructor::new(
+ entropy_source,
+ channel_id,
+ FEERATE_FLOOR_SATS_PER_KW * 10,
+ false,
+ tx_locktime,
+ session.inputs_b,
+ session.outputs_b,
+ 0,
+ );
+
+ let handle_message_send =
+ |msg: InteractiveTxMessageSend, for_constructor: &mut InteractiveTxConstructor| {
+ match msg {
+ InteractiveTxMessageSend::TxAddInput(msg) => for_constructor
+ .handle_tx_add_input(&msg)
+ .map(|msg_send| (Some(msg_send), None)),
+ InteractiveTxMessageSend::TxAddOutput(msg) => for_constructor
+ .handle_tx_add_output(&msg)
+ .map(|msg_send| (Some(msg_send), None)),
+ InteractiveTxMessageSend::TxComplete(msg) => {
+ for_constructor.handle_tx_complete(&msg).map(|value| match value {
+ HandleTxCompleteValue::SendTxMessage(msg_send) => {
+ (Some(msg_send), None)
+ },
+ HandleTxCompleteValue::SendTxComplete(msg_send, tx) => {
+ (Some(msg_send), Some(tx))
+ },
+ HandleTxCompleteValue::NegotiationComplete(tx) => (None, Some(tx)),
+ })
+ },
+ }
+ };
+
+ assert!(first_message_b.is_none());
+ let mut message_send_a = first_message_a;
+ let mut message_send_b = None;
+ let mut final_tx_a = None;
+ let mut final_tx_b = None;
+ while final_tx_a.is_none() || final_tx_b.is_none() {
+ if let Some(message_send_a) = message_send_a.take() {
+ match handle_message_send(message_send_a, &mut constructor_b) {
+ Ok((msg_send, final_tx)) => {
+ message_send_b = msg_send;
+ final_tx_b = final_tx;
+ },
+ Err(abort_reason) => {
+ let error_culprit = match abort_reason {
+ AbortReason::ExceededNumberOfInputsOrOutputs => {
+ ErrorCulprit::Indeterminate
+ },
+ _ => ErrorCulprit::NodeA,
+ };
+ assert_eq!(Some((abort_reason, error_culprit)), session.expect_error);
+ assert!(message_send_b.is_none());
+ return;
+ },
+ }
+ }
+ if let Some(message_send_b) = message_send_b.take() {
+ match handle_message_send(message_send_b, &mut constructor_a) {
+ Ok((msg_send, final_tx)) => {
+ message_send_a = msg_send;
+ final_tx_a = final_tx;
+ },
+ Err(abort_reason) => {
+ let error_culprit = match abort_reason {
+ AbortReason::ExceededNumberOfInputsOrOutputs => {
+ ErrorCulprit::Indeterminate
+ },
+ _ => ErrorCulprit::NodeB,
+ };
+ assert_eq!(Some((abort_reason, error_culprit)), session.expect_error);
+ assert!(message_send_a.is_none());
+ return;
+ },
+ }
+ }
+ }
+ assert!(message_send_a.is_none());
+ assert!(message_send_b.is_none());
+ assert_eq!(final_tx_a, final_tx_b);
+ assert!(session.expect_error.is_none());
+ }
+
+ fn generate_tx(values: &[u64]) -> Transaction {
+ generate_tx_with_locktime(values, 1337)
+ }
+
+ fn generate_tx_with_locktime(values: &[u64], locktime: u32) -> Transaction {
+ Transaction {
+ version: 2,
+ lock_time: AbsoluteLockTime::from_height(locktime).unwrap(),
+ input: vec![TxIn { ..Default::default() }],
+ output: values
+ .iter()
+ .map(|value| TxOut {
+ value: *value,
+ script_pubkey: Builder::new()
+ .push_opcode(opcodes::OP_TRUE)
+ .into_script()
+ .to_v0_p2wsh(),
+ })
+ .collect(),
+ }
+ }
+
+ fn generate_inputs(values: &[u64]) -> Vec<(TxIn, TransactionU16LenLimited)> {
+ let tx = generate_tx(values);
+ let txid = tx.txid();
+ tx.output
+ .iter()
+ .enumerate()
+ .map(|(idx, _)| {
+ let input = TxIn {
+ previous_output: OutPoint { txid, vout: idx as u32 },
+ script_sig: Default::default(),
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ witness: Default::default(),
+ };
+ (input, TransactionU16LenLimited::new(tx.clone()).unwrap())
+ })
+ .collect()
+ }
+
+ fn generate_outputs(values: &[u64]) -> Vec<TxOut> {
+ values
+ .iter()
+ .map(|value| TxOut {
+ value: *value,
+ script_pubkey: Builder::new()
+ .push_opcode(opcodes::OP_TRUE)
+ .into_script()
+ .to_v0_p2wsh(),
+ })
+ .collect()
+ }
+
+ fn generate_fixed_number_of_inputs(count: u16) -> Vec<(TxIn, TransactionU16LenLimited)> {
+ // Generate transactions with a total `count` number of outputs such that no transaction has a
+ // serialized length greater than u16::MAX.
+ let max_outputs_per_prevtx = 1_500;
+ let mut remaining = count;
+ let mut inputs: Vec<(TxIn, TransactionU16LenLimited)> = Vec::with_capacity(count as usize);
+
+ while remaining > 0 {
+ let tx_output_count = remaining.min(max_outputs_per_prevtx);
+ remaining -= tx_output_count;
+
+ // Use unique locktime for each tx so outpoints are different across transactions
+ let tx = generate_tx_with_locktime(
+ &vec![1_000_000; tx_output_count as usize],
+ (1337 + remaining).into(),
+ );
+ let txid = tx.txid();
+
+ let mut temp: Vec<(TxIn, TransactionU16LenLimited)> = tx
+ .output
+ .iter()
+ .enumerate()
+ .map(|(idx, _)| {
+ let input = TxIn {
+ previous_output: OutPoint { txid, vout: idx as u32 },
+ script_sig: Default::default(),
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ witness: Default::default(),
+ };
+ (input, TransactionU16LenLimited::new(tx.clone()).unwrap())
+ })
+ .collect();
+
+ inputs.append(&mut temp);
+ }
+
+ inputs
+ }
+
+ fn generate_fixed_number_of_outputs(count: u16) -> Vec<TxOut> {
+ // Set a constant value for each TxOut
+ generate_outputs(&vec![1_000_000; count as usize])
+ }
+
+ fn generate_non_witness_output(value: u64) -> TxOut {
+ TxOut {
+ value,
+ script_pubkey: Builder::new().push_opcode(opcodes::OP_TRUE).into_script().to_p2sh(),
+ }
+ }
+
+ #[test]
+ fn test_interactive_tx_constructor() {
+ // No contributions.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![],
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ });
+ // Single contribution, no initiator inputs.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![],
+ outputs_a: generate_outputs(&[1_000_000]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)),
+ });
+ // Single contribution, no initiator outputs.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_inputs(&[1_000_000]),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: None,
+ });
+ // Single contribution, insufficient fees.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_inputs(&[1_000_000]),
+ outputs_a: generate_outputs(&[1_000_000]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeA)),
+ });
+ // Initiator contributes sufficient fees, but non-initiator does not.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_inputs(&[1_000_000]),
+ outputs_a: vec![],
+ inputs_b: generate_inputs(&[100_000]),
+ outputs_b: generate_outputs(&[100_000]),
+ expect_error: Some((AbortReason::InsufficientFees, ErrorCulprit::NodeB)),
+ });
+ // Multi-input-output contributions from both sides.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_inputs(&[1_000_000, 1_000_000]),
+ outputs_a: generate_outputs(&[1_000_000, 200_000]),
+ inputs_b: generate_inputs(&[1_000_000, 500_000]),
+ outputs_b: generate_outputs(&[1_000_000, 400_000]),
+ expect_error: None,
+ });
+
+ // Prevout from initiator is not a witness program
+ let non_segwit_output_tx = {
+ let mut tx = generate_tx(&[1_000_000]);
+ tx.output.push(TxOut {
+ script_pubkey: Builder::new()
+ .push_opcode(opcodes::all::OP_RETURN)
+ .into_script()
+ .to_p2sh(),
+ ..Default::default()
+ });
+
+ TransactionU16LenLimited::new(tx).unwrap()
+ };
+ let non_segwit_input = TxIn {
+ previous_output: OutPoint {
+ txid: non_segwit_output_tx.as_transaction().txid(),
+ vout: 1,
+ },
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![(non_segwit_input, non_segwit_output_tx)],
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)),
+ });
+
+ // Invalid input sequence from initiator.
+ let tx = TransactionU16LenLimited::new(generate_tx(&[1_000_000])).unwrap();
+ let invalid_sequence_input = TxIn {
+ previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![(invalid_sequence_input, tx.clone())],
+ outputs_a: generate_outputs(&[1_000_000]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::IncorrectInputSequenceValue, ErrorCulprit::NodeA)),
+ });
+ // Duplicate prevout from initiator.
+ let duplicate_input = TxIn {
+ previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![(duplicate_input.clone(), tx.clone()), (duplicate_input, tx.clone())],
+ outputs_a: generate_outputs(&[1_000_000]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeA)),
+ });
+ // Non-initiator uses same prevout as initiator.
+ let duplicate_input = TxIn {
+ previous_output: OutPoint { txid: tx.as_transaction().txid(), vout: 0 },
+ sequence: Sequence::ENABLE_RBF_NO_LOCKTIME,
+ ..Default::default()
+ };
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![(duplicate_input.clone(), tx.clone())],
+ outputs_a: generate_outputs(&[1_000_000]),
+ inputs_b: vec![(duplicate_input.clone(), tx.clone())],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::PrevTxOutInvalid, ErrorCulprit::NodeB)),
+ });
+ // Initiator sends too many TxAddInputs
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_fixed_number_of_inputs(MAX_RECEIVED_TX_ADD_INPUT_COUNT + 1),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::ReceivedTooManyTxAddInputs, ErrorCulprit::NodeA)),
+ });
+ // Attempt to queue up two inputs with duplicate serial ids. We use a deliberately bad
+ // entropy source, `DuplicateEntropySource` to simulate this.
+ do_test_interactive_tx_constructor_with_entropy_source(
+ TestSession {
+ inputs_a: generate_fixed_number_of_inputs(2),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)),
+ },
+ &DuplicateEntropySource,
+ );
+ // Initiator sends too many TxAddOutputs.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![],
+ outputs_a: generate_fixed_number_of_outputs(MAX_RECEIVED_TX_ADD_OUTPUT_COUNT + 1),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::ReceivedTooManyTxAddOutputs, ErrorCulprit::NodeA)),
+ });
+ // Initiator sends an output below dust value.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![],
+ outputs_a: generate_outputs(&[1]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::BelowDustLimit, ErrorCulprit::NodeA)),
+ });
+ // Initiator sends an output above maximum sats allowed.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![],
+ outputs_a: generate_outputs(&[TOTAL_BITCOIN_SUPPLY_SATOSHIS + 1]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::ExceededMaximumSatsAllowed, ErrorCulprit::NodeA)),
+ });
+ // Initiator sends an output without a witness program.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: vec![],
+ outputs_a: vec![generate_non_witness_output(1_000_000)],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::InvalidOutputScript, ErrorCulprit::NodeA)),
+ });
+ // Attempt to queue up two outputs with duplicate serial ids. We use a deliberately bad
+ // entropy source, `DuplicateEntropySource` to simulate this.
+ do_test_interactive_tx_constructor_with_entropy_source(
+ TestSession {
+ inputs_a: vec![],
+ outputs_a: generate_fixed_number_of_outputs(2),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::DuplicateSerialId, ErrorCulprit::NodeA)),
+ },
+ &DuplicateEntropySource,
+ );
+
+ // Peer contributed more output value than inputs
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_inputs(&[100_000]),
+ outputs_a: generate_outputs(&[1_000_000]),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((AbortReason::OutputsValueExceedsInputsValue, ErrorCulprit::NodeA)),
+ });
+
+ // Peer contributed more than allowed number of inputs.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_fixed_number_of_inputs(MAX_INPUTS_OUTPUTS_COUNT as u16 + 1),
+ outputs_a: vec![],
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((
+ AbortReason::ExceededNumberOfInputsOrOutputs,
+ ErrorCulprit::Indeterminate,
+ )),
+ });
+ // Peer contributed more than allowed number of outputs.
+ do_test_interactive_tx_constructor(TestSession {
+ inputs_a: generate_inputs(&[TOTAL_BITCOIN_SUPPLY_SATOSHIS]),
+ outputs_a: generate_fixed_number_of_outputs(MAX_INPUTS_OUTPUTS_COUNT as u16 + 1),
+ inputs_b: vec![],
+ outputs_b: vec![],
+ expect_error: Some((
+ AbortReason::ExceededNumberOfInputsOrOutputs,
+ ErrorCulprit::Indeterminate,
+ )),
+ });
+ }
+
+ #[test]
+ fn test_generate_local_serial_id() {
+ let entropy_source = TestEntropySource(AtomicCounter::new());
+
+ // Initiators should have even serial id, non-initiators should have odd serial id.
+ assert_eq!(generate_holder_serial_id(&&entropy_source, true) % 2, 0);
+ assert_eq!(generate_holder_serial_id(&&entropy_source, false) % 2, 1)
+ }
+}
#[cfg(test)]
#[allow(unused_mut)]
mod offers_tests;
+#[allow(dead_code)] // TODO(dual_funding): Exchange for dual_funding cfg
+pub(crate) mod interactivetxs;
pub use self::peer_channel_encryptor::LN_MAX_MSG_LEN;
assert!(failed_payments.is_empty());
if let Event::PendingHTLCsForwardable { .. } = events[0] {} else { panic!(); }
match &events[1] {
- Event::ChannelClosed { reason: ClosureReason::HolderForceClosed, .. } => {},
+ Event::ChannelClosed { reason: ClosureReason::HTLCsTimedOut, .. } => {},
_ => panic!(),
}
connect_blocks(&nodes[1], htlc_cltv_timeout + 1 - 10);
check_closed_broadcast!(nodes[1], true);
check_added_monitors!(nodes[1], 1);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
// Prior to channel closure, B considers the preimage HTLC as its own, and otherwise only
// lists the two on-chain timeout-able HTLCs as claimable balances.
pub short_channel_id_alias: Option<u64>,
}
+/// A randomly chosen number that is used to identify inputs within an interactive transaction
+/// construction.
+pub type SerialId = u64;
+
/// An stfu (quiescence) message to be sent by or received from the stfu initiator.
// TODO(splicing): Add spec link for `stfu`; still in draft, using from https://github.com/lightning/bolts/pull/863
#[derive(Clone, Debug, PartialEq, Eq)]
pub channel_id: ChannelId,
/// A randomly chosen unique identifier for this input, which is even for initiators and odd for
/// non-initiators.
- pub serial_id: u64,
+ pub serial_id: SerialId,
/// Serialized transaction that contains the output this input spends to verify that it is non
/// malleable.
pub prevtx: TransactionU16LenLimited,
pub channel_id: ChannelId,
/// A randomly chosen unique identifier for this output, which is even for initiators and odd for
/// non-initiators.
- pub serial_id: u64,
+ pub serial_id: SerialId,
/// The satoshi value of the output
pub sats: u64,
/// The scriptPubKey for the output
/// The channel ID
pub channel_id: ChannelId,
/// The serial ID of the input to be removed
- pub serial_id: u64,
+ pub serial_id: SerialId,
}
/// A tx_remove_output message for removing an output during interactive transaction construction.
/// The channel ID
pub channel_id: ChannelId,
/// The serial ID of the output to be removed
- pub serial_id: u64,
+ pub serial_id: SerialId,
}
/// A tx_complete message signalling the conclusion of a peer's transaction contributions during
pub alias: NodeAlias,
/// List of addresses on which this node is reachable
pub addresses: Vec<SocketAddress>,
- pub(crate) excess_address_data: Vec<u8>,
- pub(crate) excess_data: Vec<u8>,
+ /// Excess address data which was signed as a part of the message which we do not (yet) understand how
+ /// to decode.
+ ///
+ /// This is stored to ensure forward-compatibility as new address types are added to the lightning gossip protocol.
+ pub excess_address_data: Vec<u8>,
+ /// Excess data which was signed as a part of the message which we do not (yet) understand how
+ /// to decode.
+ ///
+ /// This is stored to ensure forward-compatibility as new fields are added to the lightning gossip protocol.
+ pub excess_data: Vec<u8>,
}
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
/// A [`node_announcement`] message to be sent to or received from a peer.
use crate::prelude::*;
use crate::ln::{PaymentPreimage, PaymentSecret};
use crate::ln::features::BlindedHopFeatures;
- use super::FinalOnionHopData;
+ use super::{FinalOnionHopData, TrampolineOnionPacket};
// These types aren't intended to be pub, but are exposed for direct fuzzing (as we deserialize
// them from untrusted input):
payment_secret: PaymentSecret,
payment_constraints: PaymentConstraints,
intro_node_blinding_point: Option<PublicKey>,
+ keysend_preimage: Option<PaymentPreimage>,
}
}
amt_to_forward: u64,
outgoing_cltv_value: u32,
},
+ #[allow(unused)]
+ TrampolineEntrypoint {
+ amt_to_forward: u64,
+ outgoing_cltv_value: u32,
+ multipath_trampoline_data: Option<FinalOnionHopData>,
+ trampoline_packet: TrampolineOnionPacket,
+ },
Receive {
payment_data: Option<FinalOnionHopData>,
payment_metadata: Option<Vec<u8>>,
cltv_expiry_height: u32,
encrypted_tlvs: Vec<u8>,
intro_node_blinding_point: Option<PublicKey>, // Set if the introduction node of the blinded path is the final node
+ keysend_preimage: Option<PaymentPreimage>,
}
}
}
}
+/// BOLT 4 onion packet including hop data for the next peer.
+#[derive(Clone, Hash, PartialEq, Eq)]
+pub struct TrampolineOnionPacket {
+ /// Bolt 04 version number
+ pub version: u8,
+ /// A random sepc256k1 point, used to build the ECDH shared secret to decrypt hop_data
+ pub public_key: PublicKey,
+ /// Encrypted payload for the next hop
+ //
+ // Unlike the onion packets used for payments, Trampoline onion packets have to be shorter than
+ // 1300 bytes. The expected default is 650 bytes.
+ // TODO: if 650 ends up being the most common size, optimize this to be:
+ // enum { ThirteenHundred([u8; 650]), VarLen(Vec<u8>) }
+ pub hop_data: Vec<u8>,
+ /// HMAC to verify the integrity of hop_data
+ pub hmac: [u8; 32],
+}
+
+impl onion_utils::Packet for TrampolineOnionPacket {
+ type Data = Vec<u8>;
+ fn new(public_key: PublicKey, hop_data: Vec<u8>, hmac: [u8; 32]) -> Self {
+ Self {
+ version: 0,
+ public_key,
+ hop_data,
+ hmac,
+ }
+ }
+}
+
+impl Writeable for TrampolineOnionPacket {
+ fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
+ self.version.write(w)?;
+ self.public_key.write(w)?;
+ w.write_all(&self.hop_data)?;
+ self.hmac.write(w)?;
+ Ok(())
+ }
+}
+
+impl Debug for TrampolineOnionPacket {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ f.write_fmt(format_args!("TrampolineOnionPacket version {} with hmac {:?}", self.version, &self.hmac[..]))
+ }
+}
+
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct OnionErrorPacket {
// This really should be a constant size slice, but the spec lets these things be up to 128KB?
(6, short_channel_id, required)
});
},
+ Self::TrampolineEntrypoint {
+ amt_to_forward, outgoing_cltv_value, ref multipath_trampoline_data,
+ ref trampoline_packet
+ } => {
+ _encode_varint_length_prefixed_tlv!(w, {
+ (2, HighZeroBytesDroppedBigSize(*amt_to_forward), required),
+ (4, HighZeroBytesDroppedBigSize(*outgoing_cltv_value), required),
+ (8, multipath_trampoline_data, option),
+ (20, trampoline_packet, required)
+ });
+ },
Self::Receive {
ref payment_data, ref payment_metadata, ref keysend_preimage, sender_intended_htlc_amt_msat,
cltv_expiry_height, ref custom_tlvs,
},
Self::BlindedReceive {
sender_intended_htlc_amt_msat, total_msat, cltv_expiry_height, encrypted_tlvs,
- intro_node_blinding_point,
+ intro_node_blinding_point, keysend_preimage,
} => {
_encode_varint_length_prefixed_tlv!(w, {
(2, HighZeroBytesDroppedBigSize(*sender_intended_htlc_amt_msat), required),
(4, HighZeroBytesDroppedBigSize(*cltv_expiry_height), required),
(10, *encrypted_tlvs, required_vec),
(12, intro_node_blinding_point, option),
- (18, HighZeroBytesDroppedBigSize(*total_msat), required)
+ (18, HighZeroBytesDroppedBigSize(*total_msat), required),
+ (5482373484, keysend_preimage, option)
});
},
}
}
if let Some(blinding_point) = intro_node_blinding_point.or(update_add_blinding_point) {
- if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() ||
- keysend_preimage.is_some()
- {
+ if short_id.is_some() || payment_data.is_some() || payment_metadata.is_some() {
return Err(DecodeError::InvalidValue)
}
let enc_tlvs = encrypted_tlvs_opt.ok_or(DecodeError::InvalidValue)?.0;
ChaChaPolyReadAdapter { readable: BlindedPaymentTlvs::Forward(ForwardTlvs {
short_channel_id, payment_relay, payment_constraints, features
})} => {
- if amt.is_some() || cltv_value.is_some() || total_msat.is_some() {
+ if amt.is_some() || cltv_value.is_some() || total_msat.is_some() ||
+ keysend_preimage.is_some()
+ {
return Err(DecodeError::InvalidValue)
}
Ok(Self::BlindedForward {
payment_secret,
payment_constraints,
intro_node_blinding_point,
+ keysend_preimage,
})
},
}
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
use crate::ln::ChannelId;
use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
- use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket, CommonOpenChannelFields, CommonAcceptChannelFields};
+ use crate::ln::msgs::{self, FinalOnionHopData, OnionErrorPacket, CommonOpenChannelFields, CommonAcceptChannelFields, TrampolineOnionPacket};
use crate::ln::msgs::SocketAddress;
use crate::routing::gossip::{NodeAlias, NodeId};
- use crate::util::ser::{Writeable, Readable, ReadableArgs, Hostname, TransactionU16LenLimited};
+ use crate::util::ser::{BigSize, Hostname, Readable, ReadableArgs, TransactionU16LenLimited, Writeable};
use crate::util::test_utils;
use bitcoin::hashes::hex::FromHex;
} else { panic!(); }
}
+ #[test]
+ fn encoding_final_onion_hop_data_with_trampoline_packet() {
+ let secp_ctx = Secp256k1::new();
+ let (_private_key, public_key) = get_keys_from!("0101010101010101010101010101010101010101010101010101010101010101", secp_ctx);
+
+ let compressed_public_key = public_key.serialize();
+ assert_eq!(compressed_public_key.len(), 33);
+
+ let trampoline_packet = TrampolineOnionPacket {
+ version: 0,
+ public_key,
+ hop_data: vec![1; 650], // this should be the standard encoded length
+ hmac: [2; 32],
+ };
+ let encoded_trampoline_packet = trampoline_packet.encode();
+ assert_eq!(encoded_trampoline_packet.len(), 716);
+
+ let msg = msgs::OutboundOnionPayload::TrampolineEntrypoint {
+ multipath_trampoline_data: None,
+ amt_to_forward: 0x0badf00d01020304,
+ outgoing_cltv_value: 0xffffffff,
+ trampoline_packet,
+ };
+ let encoded_payload = msg.encode();
+
+ let trampoline_type_bytes = &encoded_payload[19..=19];
+ let mut trampoline_type_cursor = Cursor::new(trampoline_type_bytes);
+ let trampoline_type_big_size: BigSize = Readable::read(&mut trampoline_type_cursor).unwrap();
+ assert_eq!(trampoline_type_big_size.0, 20);
+
+ let trampoline_length_bytes = &encoded_payload[20..=22];
+ let mut trampoline_length_cursor = Cursor::new(trampoline_length_bytes);
+ let trampoline_length_big_size: BigSize = Readable::read(&mut trampoline_length_cursor).unwrap();
+ assert_eq!(trampoline_length_big_size.0, encoded_trampoline_packet.len() as u64);
+ }
+
+ #[test]
+ fn encoding_final_onion_hop_data_with_eclair_trampoline_packet() {
+ let public_key = PublicKey::from_slice(&<Vec<u8>>::from_hex("02eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619").unwrap()).unwrap();
+ let hop_data = <Vec<u8>>::from_hex("cff34152f3a36e52ca94e74927203a560392b9cc7ce3c45809c6be52166c24a595716880f95f178bf5b30ca63141f74db6e92795c6130877cfdac3d4bd3087ee73c65d627ddd709112a848cc99e303f3706509aa43ba7c8a88cba175fccf9a8f5016ef06d3b935dbb15196d7ce16dc1a7157845566901d7b2197e52cab4ce487014b14816e5805f9fcacb4f8f88b8ff176f1b94f6ce6b00bc43221130c17d20ef629db7c5f7eafaa166578c720619561dd14b3277db557ec7dcdb793771aef0f2f667cfdbeae3ac8d331c5994779dffb31e5fc0dbdedc0c592ca6d21c18e47fe3528d6975c19517d7e2ea8c5391cf17d0fe30c80913ed887234ccb48808f7ef9425bcd815c3b586210979e3bb286ef2851bf9ce04e28c40a203df98fd648d2f1936fd2f1def0e77eecb277229b4b682322371c0a1dbfcd723a991993df8cc1f2696b84b055b40a1792a29f710295a18fbd351b0f3ff34cd13941131b8278ba79303c89117120eea691738a9954908195143b039dbeed98f26a92585f3d15cf742c953799d3272e0545e9b744be9d3b4c").unwrap();
+ let hmac_vector = <Vec<u8>>::from_hex("bb079bfc4b35190eee9f59a1d7b41ba2f773179f322dafb4b1af900c289ebd6c").unwrap();
+ let mut hmac = [0; 32];
+ hmac.copy_from_slice(&hmac_vector);
+
+ let compressed_public_key = public_key.serialize();
+ assert_eq!(compressed_public_key.len(), 33);
+
+ let trampoline_packet = TrampolineOnionPacket {
+ version: 0,
+ public_key,
+ hop_data,
+ hmac,
+ };
+ let encoded_trampoline_packet = trampoline_packet.encode();
+ let expected_eclair_trampoline_packet = <Vec<u8>>::from_hex("0002eec7245d6b7d2ccb30380bfbe2a3648cd7a942653f5aa340edcea1f283686619cff34152f3a36e52ca94e74927203a560392b9cc7ce3c45809c6be52166c24a595716880f95f178bf5b30ca63141f74db6e92795c6130877cfdac3d4bd3087ee73c65d627ddd709112a848cc99e303f3706509aa43ba7c8a88cba175fccf9a8f5016ef06d3b935dbb15196d7ce16dc1a7157845566901d7b2197e52cab4ce487014b14816e5805f9fcacb4f8f88b8ff176f1b94f6ce6b00bc43221130c17d20ef629db7c5f7eafaa166578c720619561dd14b3277db557ec7dcdb793771aef0f2f667cfdbeae3ac8d331c5994779dffb31e5fc0dbdedc0c592ca6d21c18e47fe3528d6975c19517d7e2ea8c5391cf17d0fe30c80913ed887234ccb48808f7ef9425bcd815c3b586210979e3bb286ef2851bf9ce04e28c40a203df98fd648d2f1936fd2f1def0e77eecb277229b4b682322371c0a1dbfcd723a991993df8cc1f2696b84b055b40a1792a29f710295a18fbd351b0f3ff34cd13941131b8278ba79303c89117120eea691738a9954908195143b039dbeed98f26a92585f3d15cf742c953799d3272e0545e9b744be9d3b4cbb079bfc4b35190eee9f59a1d7b41ba2f773179f322dafb4b1af900c289ebd6c").unwrap();
+ assert_eq!(encoded_trampoline_packet, expected_eclair_trampoline_packet);
+ }
+
#[test]
fn query_channel_range_end_blocknum() {
let tests: Vec<(u32, u32, u32)> = vec![
//! Nodes without channels are disconnected and connected as needed to ensure that deterministic
//! blinded paths are used.
+use bitcoin::network::constants::Network;
use core::time::Duration;
use crate::blinded_path::BlindedPath;
use crate::events::{Event, MessageSendEventsProvider, PaymentPurpose};
// invoice contains the payment_hash but it was encrypted inside an onion message.
let amount_msats = invoice.amount_msats();
let payment_hash = invoice.payment_hash();
- do_pass_along_path(
- node, path, amount_msats, payment_hash, None, ev, false, false, None, false
- );
+ let args = PassAlongPathArgs::new(node, path, amount_msats, payment_hash, ev)
+ .without_clearing_recipient_events();
+ do_pass_along_path(args);
}
fn claim_bolt12_payment<'a, 'b, 'c>(node: &Node<'a, 'b, 'c>, path: &[&Node<'a, 'b, 'c>]) {
assert!(nodes[0].node.list_recent_payments().is_empty());
}
+/// Fails creating an invoice request when the offer contains an unsupported chain.
+#[test]
+fn fails_creating_invoice_request_for_unsupported_chain() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let bob = &nodes[1];
+
+ let offer = alice.node
+ .create_offer_builder("coffee".to_string()).unwrap()
+ .clear_chains()
+ .chain(Network::Signet)
+ .build().unwrap();
+
+ let payment_id = PaymentId([1; 32]);
+ match bob.node.pay_for_offer(&offer, None, None, None, payment_id, Retry::Attempts(0), None) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::UnsupportedChain),
+ }
+}
+
+/// Fails requesting a payment when the refund contains an unsupported chain.
+#[test]
+fn fails_sending_invoice_with_unsupported_chain_for_refund() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 10_000_000, 1_000_000_000);
+
+ let alice = &nodes[0];
+ let bob = &nodes[1];
+
+ let absolute_expiry = Duration::from_secs(u64::MAX);
+ let payment_id = PaymentId([1; 32]);
+ let refund = bob.node
+ .create_refund_builder(
+ "refund".to_string(), 10_000_000, absolute_expiry, payment_id, Retry::Attempts(0), None
+ )
+ .unwrap()
+ .chain(Network::Signet)
+ .build().unwrap();
+
+ match alice.node.request_refund_payment(&refund) {
+ Ok(_) => panic!("Expected error"),
+ Err(e) => assert_eq!(e, Bolt12SemanticError::UnsupportedChain),
+ }
+}
+
/// Fails creating an invoice request when a blinded reply path cannot be created without exposing
/// the node's id.
#[test]
cltv_expiry_height, payment_metadata, false),
msgs::InboundOnionPayload::BlindedReceive {
sender_intended_htlc_amt_msat, total_msat, cltv_expiry_height, payment_secret,
- intro_node_blinding_point, payment_constraints, ..
+ intro_node_blinding_point, payment_constraints, keysend_preimage, ..
} => {
check_blinded_payment_constraints(
sender_intended_htlc_amt_msat, cltv_expiry, &payment_constraints
}
})?;
let payment_data = msgs::FinalOnionHopData { payment_secret, total_msat };
- (Some(payment_data), None, Vec::new(), sender_intended_htlc_amt_msat, cltv_expiry_height,
- None, intro_node_blinding_point.is_none())
+ (Some(payment_data), keysend_preimage, Vec::new(),
+ sender_intended_htlc_amt_msat, cltv_expiry_height, None,
+ intro_node_blinding_point.is_none())
}
msgs::InboundOnionPayload::Forward { .. } => {
return Err(InboundHTLCErr {
payment_metadata,
incoming_cltv_expiry: onion_cltv_expiry,
custom_tlvs,
+ requires_blinded_error,
}
} else if let Some(data) = payment_data {
PendingHTLCRouting::Receive {
cltv_expiry_height: cur_cltv + excess_final_cltv_expiry_delta,
encrypted_tlvs: blinded_hop.encrypted_payload.clone(),
intro_node_blinding_point: blinding_point.take(),
+ keysend_preimage: *keysend_preimage,
});
} else {
res.push(msgs::OutboundOnionPayload::BlindedForward {
assert_eq!(send_events.len(), 2);
let node_1_msgs = remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut send_events);
let node_2_msgs = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut send_events);
- do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_1_msgs, true, false, None, false);
- do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), node_2_msgs, true, false, None, false);
+ do_pass_along_path(PassAlongPathArgs::new(&nodes[0],&[&nodes[1], &nodes[3]], 15_000_000, payment_hash, node_1_msgs)
+ .with_payment_secret(payment_secret)
+ .without_clearing_recipient_events());
+ do_pass_along_path(PassAlongPathArgs::new(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, node_2_msgs)
+ .with_payment_secret(payment_secret)
+ .without_clearing_recipient_events());
// Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
// monitors and ChannelManager, for use later, if we don't want to persist both monitors.
// Connect blocks on node B
connect_blocks(&nodes[1], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast!(nodes[1], true);
- check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 1000000);
+ check_closed_event!(nodes[1], 1, ClosureReason::HTLCsTimedOut, [nodes[0].node.get_our_node_id()], 1000000);
check_added_monitors!(nodes[1], 1);
// Verify node B broadcast 2 HTLC-timeout txn
let partial_claim_tx = {
connect_blocks(&nodes[0], TEST_FINAL_CLTV + LATENCY_GRACE_PERIOD_BLOCKS + 1);
check_closed_broadcast(&nodes[0], 1, true);
check_added_monitors(&nodes[0], 1);
- check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed, false, &[nodes[1].node.get_our_node_id()], 100_000);
+ check_closed_event(&nodes[0], 1, ClosureReason::HTLCsTimedOut, false, &[nodes[1].node.get_our_node_id()], 100_000);
{
let mut txn = nodes[0].tx_broadcaster.txn_broadcast();
let (_, our_payment_hash, our_payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None);
let route_params = if blinded_recipient {
crate::ln::blinded_payment_tests::get_blinded_route_parameters(
- amt_msat, our_payment_secret,
+ amt_msat, our_payment_secret, 1, 100000000,
nodes.iter().skip(1).map(|n| n.node.get_our_node_id()).collect(), &[&chan_2.0.contents],
&chanmon_cfgs[2].keys_manager)
} else {
$return_value
}
+ #[cfg_attr(c_bindings, allow(dead_code))]
+ pub(crate) fn clear_chains($($self_mut)* $self: $self_type) -> $return_type {
+ $self.offer.chains = None;
+ $return_value
+ }
+
#[cfg_attr(c_bindings, allow(dead_code))]
pub(crate) fn clear_paths($($self_mut)* $self: $self_type) -> $return_type {
$self.offer.paths = None;
use crate::blinded_path::BlindedPath;
use crate::events::{Event, EventsProvider};
-use crate::ln::features::InitFeatures;
-use crate::ln::msgs::{self, DecodeError, OnionMessageHandler, SocketAddress};
+use crate::ln::features::{ChannelFeatures, InitFeatures};
+use crate::ln::msgs::{self, DecodeError, OnionMessageHandler};
+use crate::routing::gossip::{NetworkGraph, P2PGossipSync};
+use crate::routing::test_utils::{add_channel, add_or_update_node};
use crate::sign::{NodeSigner, Recipient};
use crate::util::ser::{FixedLengthReader, LengthReadable, Writeable, Writer};
use crate::util::test_utils;
-use super::messenger::{CustomOnionMessageHandler, Destination, MessageRouter, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
+use super::messenger::{CustomOnionMessageHandler, DefaultMessageRouter, Destination, OnionMessagePath, OnionMessenger, PendingOnionMessage, SendError};
use super::offers::{OffersMessage, OffersMessageHandler};
use super::packet::{OnionMessageContents, Packet};
use bitcoin::network::constants::Network;
use bitcoin::hashes::hex::FromHex;
-use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey, self};
+use bitcoin::secp256k1::{All, PublicKey, Secp256k1, SecretKey};
use crate::io;
use crate::io_extras::read_to_end;
use crate::sync::{Arc, Mutex};
+use core::ops::Deref;
+
use crate::prelude::*;
struct MessengerNode {
node_id: PublicKey,
+ privkey: SecretKey,
entropy_source: Arc<test_utils::TestKeysInterface>,
messenger: OnionMessenger<
Arc<test_utils::TestKeysInterface>,
Arc<test_utils::TestNodeSigner>,
Arc<test_utils::TestLogger>,
- Arc<TestMessageRouter>,
+ Arc<DefaultMessageRouter<
+ Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
+ Arc<test_utils::TestLogger>,
+ Arc<test_utils::TestKeysInterface>
+ >>,
Arc<TestOffersMessageHandler>,
Arc<TestCustomMessageHandler>
>,
custom_message_handler: Arc<TestCustomMessageHandler>,
-}
-
-struct TestMessageRouter {}
-
-impl MessageRouter for TestMessageRouter {
- fn find_path(
- &self, _sender: PublicKey, _peers: Vec<PublicKey>, destination: Destination
- ) -> Result<OnionMessagePath, ()> {
- Ok(OnionMessagePath {
- intermediate_nodes: vec![],
- destination,
- first_node_addresses:
- Some(vec![SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }]),
- })
- }
-
- fn create_blinded_paths<
- T: secp256k1::Signing + secp256k1::Verification
- >(
- &self, _recipient: PublicKey, _peers: Vec<PublicKey>, _secp_ctx: &Secp256k1<T>,
- ) -> Result<Vec<BlindedPath>, ()> {
- unreachable!()
- }
+ gossip_sync: Arc<P2PGossipSync<
+ Arc<NetworkGraph<Arc<test_utils::TestLogger>>>,
+ Arc<test_utils::TestChainSource>,
+ Arc<test_utils::TestLogger>
+ >>
}
struct TestOffersMessageHandler {}
}
fn create_nodes_using_secrets(secrets: Vec<SecretKey>) -> Vec<MessengerNode> {
+ let gossip_logger = Arc::new(test_utils::TestLogger::with_id("gossip".to_string()));
+ let network_graph = Arc::new(NetworkGraph::new(Network::Testnet, gossip_logger.clone()));
+ let gossip_sync = Arc::new(
+ P2PGossipSync::new(network_graph.clone(), None, gossip_logger)
+ );
+
let mut nodes = Vec::new();
for (i, secret_key) in secrets.into_iter().enumerate() {
let logger = Arc::new(test_utils::TestLogger::with_id(format!("node {}", i)));
let entropy_source = Arc::new(test_utils::TestKeysInterface::new(&seed, Network::Testnet));
let node_signer = Arc::new(test_utils::TestNodeSigner::new(secret_key));
- let message_router = Arc::new(TestMessageRouter {});
+ let message_router = Arc::new(
+ DefaultMessageRouter::new(network_graph.clone(), entropy_source.clone())
+ );
let offers_message_handler = Arc::new(TestOffersMessageHandler {});
let custom_message_handler = Arc::new(TestCustomMessageHandler::new());
nodes.push(MessengerNode {
+ privkey: secret_key,
node_id: node_signer.get_node_id(Recipient::Node).unwrap(),
entropy_source: entropy_source.clone(),
messenger: OnionMessenger::new(
offers_message_handler, custom_message_handler.clone()
),
custom_message_handler,
+ gossip_sync: gossip_sync.clone(),
});
}
for i in 0..nodes.len() - 1 {
events.into_inner()
}
+fn add_channel_to_graph(
+ node_a: &MessengerNode, node_b: &MessengerNode, secp_ctx: &Secp256k1<All>, short_channel_id: u64
+) {
+ let gossip_sync = node_a.gossip_sync.deref();
+ let privkey_a = &node_a.privkey;
+ let privkey_b = &node_b.privkey;
+ let channel_features = ChannelFeatures::empty();
+ let node_features_a = node_a.messenger.provided_node_features();
+ let node_features_b = node_b.messenger.provided_node_features();
+ add_channel(gossip_sync, secp_ctx, privkey_a, privkey_b, channel_features, short_channel_id);
+ add_or_update_node(gossip_sync, secp_ctx, privkey_a, node_features_a, 1);
+ add_or_update_node(gossip_sync, secp_ctx, privkey_b, node_features_b, 1);
+}
+
fn pass_along_path(path: &Vec<MessengerNode>) {
let mut prev_node = &path[0];
for node in path.into_iter().skip(1) {
let nodes = create_nodes(2);
let test_msg = TestCustomMessage::Response;
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::Node(nodes[1].node_id),
- first_node_addresses: None,
- };
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ let destination = Destination::Node(nodes[1].node_id);
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
}
destination: Destination::Node(nodes[2].node_id),
first_node_addresses: None,
};
+
nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ let destination = Destination::BlindedPath(blinded_path);
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
}
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
+ let destination = Destination::BlindedPath(blinded_path);
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[3].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
}
let secp_ctx = Secp256k1::new();
let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
+ let destination = Destination::BlindedPath(blinded_path);
- nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg.clone(), destination, None).unwrap();
nodes[2].custom_message_handler.expect_message(TestCustomMessage::Response);
pass_along_path(&nodes);
// Try with a two-hop blinded path where we are the introduction node.
let blinded_path = BlindedPath::new_for_message(&[nodes[0].node_id, nodes[1].node_id], &*nodes[1].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap();
+ let destination = Destination::BlindedPath(blinded_path);
+ nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap();
nodes[1].custom_message_handler.expect_message(TestCustomMessage::Response);
nodes.remove(2);
pass_along_path(&nodes);
let secp_ctx = Secp256k1::new();
let mut blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id], &*nodes[2].entropy_source, &secp_ctx).unwrap();
blinded_path.blinded_hops.clear();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
- let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg.clone(), None).unwrap_err();
+ let destination = Destination::BlindedPath(blinded_path);
+ let err = nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap_err();
assert_eq!(err, SendError::TooFewBlindedHops);
}
// Destination::BlindedPath
let blinded_path = BlindedPath::new_for_message(&[nodes[1].node_id, nodes[2].node_id, nodes[3].node_id], &*nodes[3].entropy_source, &secp_ctx).unwrap();
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::BlindedPath(blinded_path),
- first_node_addresses: None,
- };
+ let destination = Destination::BlindedPath(blinded_path);
let reply_path = BlindedPath::new_for_message(&[nodes[2].node_id, nodes[1].node_id, nodes[0].node_id], &*nodes[0].entropy_source, &secp_ctx).unwrap();
- nodes[0].messenger.send_onion_message_using_path(path, test_msg, Some(reply_path)).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg, destination, Some(reply_path)).unwrap();
nodes[3].custom_message_handler.expect_message(TestCustomMessage::Request);
pass_along_path(&nodes);
}
let test_msg = InvalidCustomMessage {};
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::Node(nodes[1].node_id),
- first_node_addresses: None,
- };
- let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
+ let destination = Destination::Node(nodes[1].node_id);
+ let err = nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap_err();
assert_eq!(err, SendError::InvalidMessage);
}
fn peer_buffer_full() {
let nodes = create_nodes(2);
let test_msg = TestCustomMessage::Request;
- let path = OnionMessagePath {
- intermediate_nodes: vec![],
- destination: Destination::Node(nodes[1].node_id),
- first_node_addresses: None,
- };
+ let destination = Destination::Node(nodes[1].node_id);
for _ in 0..188 { // Based on MAX_PER_PEER_BUFFER_SIZE in OnionMessenger
- nodes[0].messenger.send_onion_message_using_path(path.clone(), test_msg.clone(), None).unwrap();
+ nodes[0].messenger.send_onion_message(test_msg.clone(), destination.clone(), None).unwrap();
}
- let err = nodes[0].messenger.send_onion_message_using_path(path, test_msg, None).unwrap_err();
+ let err = nodes[0].messenger.send_onion_message(test_msg, destination, None).unwrap_err();
assert_eq!(err, SendError::BufferFull);
}
let nodes = create_nodes(3);
let message = TestCustomMessage::Request;
let secp_ctx = Secp256k1::new();
+ add_channel_to_graph(&nodes[0], &nodes[1], &secp_ctx, 42);
+
let blinded_path = BlindedPath::new_for_message(
&[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
).unwrap();
let nodes = create_nodes(3);
let message = TestCustomMessage::Request;
let secp_ctx = Secp256k1::new();
+ add_channel_to_graph(&nodes[0], &nodes[1], &secp_ctx, 42);
+
let blinded_path = BlindedPath::new_for_message(
&[nodes[1].node_id, nodes[2].node_id], &*nodes[0].entropy_source, &secp_ctx
).unwrap();
use super::packet::ParsedOnionMessageContents;
use super::offers::OffersMessageHandler;
use super::packet::{BIG_PACKET_HOP_DATA_LEN, ForwardControlTlvs, Packet, Payload, ReceiveControlTlvs, SMALL_PACKET_HOP_DATA_LEN};
-use crate::util::logger::Logger;
+use crate::util::logger::{Logger, WithContext};
use crate::util::ser::Writeable;
use core::fmt;
ES::Target: EntropySource,
{
fn find_path(
- &self, _sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
+ &self, sender: PublicKey, peers: Vec<PublicKey>, destination: Destination
) -> Result<OnionMessagePath, ()> {
let first_node = destination.first_node();
- if peers.contains(&first_node) {
+ if peers.contains(&first_node) || sender == first_node {
Ok(OnionMessagePath {
intermediate_nodes: vec![], destination, first_node_addresses: None
})
&self, contents: T, destination: Destination, reply_path: Option<BlindedPath>,
log_suffix: fmt::Arguments
) -> Result<SendSuccess, SendError> {
+ let mut logger = WithContext::from(&self.logger, None, None);
let result = self.find_path(destination)
- .and_then(|path| self.enqueue_onion_message(path, contents, reply_path, log_suffix));
+ .and_then(|path| {
+ let first_hop = path.intermediate_nodes.get(0).map(|p| *p);
+ logger = WithContext::from(&self.logger, first_hop, None);
+ self.enqueue_onion_message(path, contents, reply_path, log_suffix)
+ });
match result.as_ref() {
Err(SendError::GetNodeIdFailed) => {
- log_warn!(self.logger, "Unable to retrieve node id {}", log_suffix);
+ log_warn!(logger, "Unable to retrieve node id {}", log_suffix);
},
Err(SendError::PathNotFound) => {
- log_trace!(self.logger, "Failed to find path {}", log_suffix);
+ log_trace!(logger, "Failed to find path {}", log_suffix);
},
Err(e) => {
- log_trace!(self.logger, "Failed sending onion message {}: {:?}", log_suffix, e);
+ log_trace!(logger, "Failed sending onion message {}: {:?}", log_suffix, e);
},
Ok(SendSuccess::Buffered) => {
- log_trace!(self.logger, "Buffered onion message {}", log_suffix);
+ log_trace!(logger, "Buffered onion message {}", log_suffix);
},
Ok(SendSuccess::BufferedAwaitingConnection(node_id)) => {
log_trace!(
- self.logger, "Buffered onion message waiting on peer connection {}: {:?}",
+ logger,
+ "Buffered onion message waiting on peer connection {}: {}",
log_suffix, node_id
);
},
OMH::Target: OffersMessageHandler,
CMH::Target: CustomOnionMessageHandler,
{
- fn handle_onion_message(&self, _peer_node_id: &PublicKey, msg: &OnionMessage) {
+ fn handle_onion_message(&self, peer_node_id: &PublicKey, msg: &OnionMessage) {
+ let logger = WithContext::from(&self.logger, Some(*peer_node_id), None);
match self.peel_onion_message(msg) {
Ok(PeeledOnion::Receive(message, path_id, reply_path)) => {
log_trace!(
- self.logger,
- "Received an onion message with path_id {:02x?} and {} reply_path: {:?}",
+ logger,
+ "Received an onion message with path_id {:02x?} and {} reply_path: {:?}",
path_id, if reply_path.is_some() { "a" } else { "no" }, message);
match message {
Ok(PeeledOnion::Forward(next_node_id, onion_message)) => {
let mut message_recipients = self.message_recipients.lock().unwrap();
if outbound_buffer_full(&next_node_id, &message_recipients) {
- log_trace!(self.logger, "Dropping forwarded onion message to peer {:?}: outbound buffer full", next_node_id);
+ log_trace!(
+ logger,
+ "Dropping forwarded onion message to peer {}: outbound buffer full",
+ next_node_id);
return
}
e.get(), OnionMessageRecipient::ConnectedPeer(..)
) => {
e.get_mut().enqueue_message(onion_message);
- log_trace!(self.logger, "Forwarding an onion message to peer {}", next_node_id);
+ log_trace!(logger, "Forwarding an onion message to peer {}", next_node_id);
},
_ => {
- log_trace!(self.logger, "Dropping forwarded onion message to disconnected peer {:?}", next_node_id);
+ log_trace!(
+ logger,
+ "Dropping forwarded onion message to disconnected peer {}",
+ next_node_id);
return
},
}
},
Err(e) => {
- log_error!(self.logger, "Failed to process onion message {:?}", e);
+ log_error!(logger, "Failed to process onion message {:?}", e);
}
}
}
NodeId(pubkey.serialize())
}
+ /// Create a new NodeId from a slice of bytes
+ pub fn from_slice(bytes: &[u8]) -> Result<Self, DecodeError> {
+ if bytes.len() != PUBLIC_KEY_SIZE {
+ return Err(DecodeError::InvalidValue);
+ }
+ let mut data = [0; PUBLIC_KEY_SIZE];
+ data.copy_from_slice(bytes);
+ Ok(NodeId(data))
+ }
+
/// Get the public key slice from this NodeId
pub fn as_slice(&self) -> &[u8] {
&self.0
pub mod router;
pub mod scoring;
#[cfg(test)]
-mod test_utils;
+pub(crate) mod test_utils;
_ => None,
}
}
- fn blinded_route_hints(&self) -> &[(BlindedPayInfo, BlindedPath)] {
+ pub(crate) fn blinded_route_hints(&self) -> &[(BlindedPayInfo, BlindedPath)] {
match self {
Self::Blinded { route_hints, .. } => &route_hints[..],
Self::Clear { .. } => &[]
use crate::routing::gossip::{NetworkGraph, NodeAlias, P2PGossipSync};
use crate::ln::features::{ChannelFeatures, NodeFeatures};
-use crate::ln::msgs::{UnsignedChannelAnnouncement, ChannelAnnouncement, RoutingMessageHandler,
- NodeAnnouncement, UnsignedNodeAnnouncement, ChannelUpdate, UnsignedChannelUpdate, MAX_VALUE_MSAT};
+use crate::ln::msgs::{ChannelAnnouncement, ChannelUpdate, MAX_VALUE_MSAT, NodeAnnouncement, RoutingMessageHandler, SocketAddress, UnsignedChannelAnnouncement, UnsignedChannelUpdate, UnsignedNodeAnnouncement};
use crate::util::test_utils;
use crate::util::ser::Writeable;
use crate::routing::gossip::NodeId;
// Using the same keys for LN and BTC ids
-pub(super) fn add_channel(
+pub(crate) fn add_channel(
gossip_sync: &P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
secp_ctx: &Secp256k1<All>, node_1_privkey: &SecretKey, node_2_privkey: &SecretKey, features: ChannelFeatures, short_channel_id: u64
) {
};
}
-pub(super) fn add_or_update_node(
+pub(crate) fn add_or_update_node(
gossip_sync: &P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey, features: NodeFeatures, timestamp: u32
) {
node_id,
rgb: [0; 3],
alias: NodeAlias([0; 32]),
- addresses: Vec::new(),
+ addresses: vec![SocketAddress::TcpIpV4 { addr: [127, 0, 0, 1], port: 1000 }],
excess_address_data: Vec::new(),
excess_data: Vec::new(),
};
};
}
-pub(super) fn update_channel(
+pub(crate) fn update_channel(
gossip_sync: &P2PGossipSync<Arc<NetworkGraph<Arc<test_utils::TestLogger>>>, Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>,
secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey, update: UnsignedChannelUpdate
) {
pub fn into_transaction(self) -> Transaction {
self.0
}
+
+ /// Returns a reference to the contained `Transaction`
+ pub fn as_transaction(&self) -> &Transaction {
+ &self.0
+ }
}
impl Writeable for TransactionU16LenLimited {
--- /dev/null
+## Bug Fixes
+
+* LDK previously would fail to forward an intermediate blinded payment
+ if the blinded hop features were absent, potentially breaking
+ interoperability.
--- /dev/null
+## Bug fixes
+
+* LDK previously serialized `PaymentRelay::fee_base_msat` as a u32 when it
+ should have been serialized as a tu32. Similarly, we were serializing
+ `PaymentConstraints::htlc_minimum_msat` as a u64 when we should have been
+ serializing it as tu64. This caused lack of interoperability when using other
+ implementations as forwarding nodes along blinded payment paths.