msgs::DecodeError::InvalidValue => return,
msgs::DecodeError::BadLengthDescriptor => return,
msgs::DecodeError::ShortRead => panic!("We picked the length..."),
- msgs::DecodeError::Io(e) => panic!(format!("{}", e)),
+ msgs::DecodeError::Io(e) => panic!(format!("{:?}", e)),
}
}
}}
proportional_millionths: slice_to_be32(get_slice!(4)),
},
cltv_expiry_delta: slice_to_be16(get_slice!(2)),
- htlc_minimum_msat: slice_to_be64(get_slice!(8)),
+ htlc_minimum_msat: Some(slice_to_be64(get_slice!(8))),
+ htlc_maximum_msat: None,
});
}
}
use bitcoin::util::bip143;
use bitcoin::consensus::encode;
-use bitcoin::hashes::{Hash, HashEngine};
+use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
use ln::msgs;
use ln::msgs::{DecodeError, OptionalField, DataLossProtect};
use ln::channelmanager::{PendingHTLCStatus, HTLCSource, HTLCFailReason, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, PaymentPreimage, PaymentHash, BREAKDOWN_TIMEOUT, MAX_LOCAL_BREAKDOWN_TIMEOUT};
-use ln::chan_utils::{CounterpartyCommitmentSecrets, HolderCommitmentTransaction, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys, PreCalculatedTxCreationKeys};
+use ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor};
use ln::chan_utils;
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER};
use chain::transaction::{OutPoint, TransactionData};
-use chain::keysinterface::{ChannelKeys, KeysInterface};
+use chain::keysinterface::{Sign, KeysInterface};
use util::transaction_utils;
-use util::ser::{Readable, Writeable, Writer};
+use util::ser::{Readable, ReadableArgs, Writeable, Writer, VecWriter};
use util::logger::Logger;
use util::errors::APIError;
use util::config::{UserConfig,ChannelConfig};
use std::default::Default;
use std::{cmp,mem,fmt};
use std::ops::Deref;
+#[cfg(any(test, feature = "fuzztarget"))]
+use std::sync::Mutex;
use bitcoin::hashes::hex::ToHex;
+use bitcoin::blockdata::opcodes::all::OP_PUSHBYTES_0;
#[cfg(test)]
pub struct ChannelValueStat {
/// anyway). That said, ChannelMonitor does this for us (see
/// ChannelMonitor::would_broadcast_at_height) so we actually remove the HTLC from our own
/// local state before then, once we're sure that the next commitment_signed and
- /// ChannelMonitor::provide_latest_local_commitment_tx_info will not include this HTLC.
+ /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
LocalRemoved(InboundHTLCRemovalReason),
}
const BOTH_SIDES_SHUTDOWN_MASK: u32 = ChannelState::LocalShutdownSent as u32 | ChannelState::RemoteShutdownSent as u32;
const MULTI_STATE_FLAGS: u32 = BOTH_SIDES_SHUTDOWN_MASK | ChannelState::PeerDisconnected as u32 | ChannelState::MonitorUpdateFailed as u32;
-const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
/// Liveness is called to fluctuate given peer disconnecton/monitor failures/closing.
/// If channel is public, network should have a liveness view announced by us on a
DisabledStaged,
}
+/// An enum indicating whether the local or remote side offered a given HTLC.
+enum HTLCInitiator {
+ LocalOffered,
+ RemoteOffered,
+}
+
+/// Used when calculating whether we or the remote can afford an additional HTLC.
+struct HTLCCandidate {
+ amount_msat: u64,
+ origin: HTLCInitiator,
+}
+
+impl HTLCCandidate {
+ fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
+ Self {
+ amount_msat,
+ origin,
+ }
+ }
+}
+
// TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking
// has been completed, and then turn into a Channel to get compiler-time enforcement of things like
// calling channel_id() before we're set up or things like get_outbound_funding_signed on an
//
// Holder designates channel data owned for the benefice of the user client.
// Counterparty designates channel data owned by the another channel participant entity.
-pub(super) struct Channel<ChanSigner: ChannelKeys> {
+pub(super) struct Channel<Signer: Sign> {
config: ChannelConfig,
user_id: u64,
channel_id: [u8; 32],
channel_state: u32,
- channel_outbound: bool,
secp_ctx: Secp256k1<secp256k1::All>,
channel_value_satoshis: u64,
latest_monitor_update_id: u64,
- #[cfg(not(test))]
- holder_keys: ChanSigner,
- #[cfg(test)]
- pub(super) holder_keys: ChanSigner,
+ holder_signer: Signer,
shutdown_pubkey: PublicKey,
destination_script: Script,
last_sent_closing_fee: Option<(u32, u64, Signature)>, // (feerate, fee, holder_sig)
- funding_txo: Option<OutPoint>,
-
/// The hash of the block in which the funding transaction reached our CONF_TARGET. We use this
/// to detect unconfirmation after a serialize-unserialize roundtrip where we may not see a full
/// series of block_connected/block_disconnected calls. Obviously this is not a guarantee as we
// get_holder_selected_channel_reserve_satoshis(channel_value_sats: u64): u64
counterparty_htlc_minimum_msat: u64,
holder_htlc_minimum_msat: u64,
- counterparty_selected_contest_delay: u16,
- holder_selected_contest_delay: u16,
#[cfg(test)]
pub counterparty_max_accepted_htlcs: u16,
#[cfg(not(test))]
//implied by OUR_MAX_HTLCS: max_accepted_htlcs: u16,
minimum_depth: u32,
- counterparty_pubkeys: Option<ChannelPublicKeys>,
+ pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
counterparty_cur_commitment_point: Option<PublicKey>,
commitment_secrets: CounterpartyCommitmentSecrets,
network_sync: UpdateStatus,
+
+ // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
+ // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
+ // be, by comparing the cached values to the fee of the tranaction generated by
+ // `build_commitment_transaction`.
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
+}
+
+#[cfg(any(test, feature = "fuzztarget"))]
+struct CommitmentTxInfoCached {
+ fee: u64,
+ total_pending_htlcs: usize,
+ next_holder_htlc_id: u64,
+ next_counterparty_htlc_id: u64,
+ feerate: u32,
}
pub const OUR_MAX_HTLCS: u16 = 50; //TODO
};
}
-impl<ChanSigner: ChannelKeys> Channel<ChanSigner> {
+impl<Signer: Sign> Channel<Signer> {
// Convert constants + channel value to limits:
fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64) -> u64 {
channel_value_satoshis * 1000 / 10 //TODO
}
// Constructors:
- pub fn new_outbound<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig) -> Result<Channel<ChanSigner>, APIError>
- where K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ pub fn new_outbound<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, APIError>
+ where K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator,
{
let holder_selected_contest_delay = config.own_channel_config.our_to_self_delay;
- let chan_keys = keys_provider.get_channel_keys(false, channel_value_satoshis);
+ let holder_signer = keys_provider.get_channel_signer(false, channel_value_satoshis);
+ let pubkeys = holder_signer.pubkeys().clone();
if channel_value_satoshis >= MAX_FUNDING_SATOSHIS {
return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than {}, it was {}", MAX_FUNDING_SATOSHIS, channel_value_satoshis)});
return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
}
let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
- if Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis) < Channel::<ChanSigner>::derive_holder_dust_limit_satoshis(background_feerate) {
+ if Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis) < Channel::<Signer>::derive_holder_dust_limit_satoshis(background_feerate) {
return Err(APIError::FeeRateTooHigh{err: format!("Not enough reserve above dust limit can be found at current fee rate({})", background_feerate), feerate: background_feerate});
}
let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes());
+
Ok(Channel {
user_id,
config: config.channel_options.clone(),
channel_id: keys_provider.get_secure_random_bytes(),
channel_state: ChannelState::OurInitSent as u32,
- channel_outbound: true,
- secp_ctx: Secp256k1::new(),
+ secp_ctx,
channel_value_satoshis,
latest_monitor_update_id: 0,
- holder_keys: chan_keys,
+ holder_signer,
shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
destination_script: keys_provider.get_destination_script(),
last_sent_closing_fee: None,
- funding_txo: None,
funding_tx_confirmed_in: None,
short_channel_id: None,
last_block_connected: Default::default(),
feerate_per_kw: feerate,
counterparty_dust_limit_satoshis: 0,
- holder_dust_limit_satoshis: Channel::<ChanSigner>::derive_holder_dust_limit_satoshis(background_feerate),
+ holder_dust_limit_satoshis: Channel::<Signer>::derive_holder_dust_limit_satoshis(background_feerate),
counterparty_max_htlc_value_in_flight_msat: 0,
counterparty_selected_channel_reserve_satoshis: 0,
counterparty_htlc_minimum_msat: 0,
holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat },
- counterparty_selected_contest_delay: 0,
- holder_selected_contest_delay,
counterparty_max_accepted_htlcs: 0,
minimum_depth: 0, // Filled in in accept_channel
- counterparty_pubkeys: None,
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.own_channel_config.our_to_self_delay,
+ is_outbound_from_holder: true,
+ counterparty_parameters: None,
+ funding_outpoint: None
+ },
counterparty_cur_commitment_point: None,
counterparty_prev_commitment_point: None,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
network_sync: UpdateStatus::Fresh,
+
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
})
}
/// Creates a new channel from a remote sides' request for one.
/// Assumes chain_hash has already been checked and corresponds with what we expect!
- pub fn new_from_req<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig) -> Result<Channel<ChanSigner>, ChannelError>
- where K::Target: KeysInterface<ChanKeySigner = ChanSigner>,
+ pub fn new_from_req<K: Deref, F: Deref>(fee_estimator: &F, keys_provider: &K, counterparty_node_id: PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel, user_id: u64, config: &UserConfig) -> Result<Channel<Signer>, ChannelError>
+ where K::Target: KeysInterface<Signer = Signer>,
F::Target: FeeEstimator
{
- let mut chan_keys = keys_provider.get_channel_keys(true, msg.funding_satoshis);
+ let holder_signer = keys_provider.get_channel_signer(true, msg.funding_satoshis);
+ let pubkeys = holder_signer.pubkeys().clone();
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: msg.funding_pubkey,
revocation_basepoint: msg.revocation_basepoint,
delayed_payment_basepoint: msg.delayed_payment_basepoint,
htlc_basepoint: msg.htlc_basepoint
};
- chan_keys.on_accept(&counterparty_pubkeys, msg.to_self_delay, config.own_channel_config.our_to_self_delay);
let mut local_config = (*config).channel_options.clone();
if config.own_channel_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
if msg.htlc_minimum_msat >= full_channel_value_msat {
return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
}
- Channel::<ChanSigner>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
let max_counterparty_selected_contest_delay = u16::min(config.peer_channel_config_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
if msg.to_self_delay > max_counterparty_selected_contest_delay {
if msg.max_accepted_htlcs < 1 {
return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
}
- if msg.max_accepted_htlcs > 483 {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than 483", msg.max_accepted_htlcs)));
+ if msg.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
}
// Now check against optional parameters as set by config...
let background_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
- let holder_dust_limit_satoshis = Channel::<ChanSigner>::derive_holder_dust_limit_satoshis(background_feerate);
- let holder_selected_channel_reserve_satoshis = Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
+ let holder_dust_limit_satoshis = Channel::<Signer>::derive_holder_dust_limit_satoshis(background_feerate);
+ let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
if holder_selected_channel_reserve_satoshis < holder_dust_limit_satoshis {
return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, holder_dust_limit_satoshis)));
}
let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
match &msg.shutdown_scriptpubkey {
&OptionalField::Present(ref script) => {
- // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg
- if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() {
- Some(script.clone())
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- } else if script.len() == 0 {
+ if script.len() == 0 {
None
// Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
- } else {
+ } else if is_unsupported_shutdown_script(&their_features, script) {
return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: ({})", script.to_bytes().to_hex())));
+ } else {
+ Some(script.clone())
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
}
} else { None };
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&keys_provider.get_secure_random_bytes());
+
let chan = Channel {
user_id,
config: local_config,
channel_id: msg.temporary_channel_id,
channel_state: (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32),
- channel_outbound: false,
- secp_ctx: Secp256k1::new(),
+ secp_ctx,
latest_monitor_update_id: 0,
- holder_keys: chan_keys,
+ holder_signer,
shutdown_pubkey: keys_provider.get_shutdown_pubkey(),
destination_script: keys_provider.get_destination_script(),
last_sent_closing_fee: None,
- funding_txo: None,
funding_tx_confirmed_in: None,
short_channel_id: None,
last_block_connected: Default::default(),
counterparty_selected_channel_reserve_satoshis: msg.channel_reserve_satoshis,
counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
holder_htlc_minimum_msat: if config.own_channel_config.our_htlc_minimum_msat == 0 { 1 } else { config.own_channel_config.our_htlc_minimum_msat },
- counterparty_selected_contest_delay: msg.to_self_delay,
- holder_selected_contest_delay: config.own_channel_config.our_to_self_delay,
counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
minimum_depth: config.own_channel_config.minimum_depth,
- counterparty_pubkeys: Some(counterparty_pubkeys),
+ channel_transaction_parameters: ChannelTransactionParameters {
+ holder_pubkeys: pubkeys,
+ holder_selected_contest_delay: config.own_channel_config.our_to_self_delay,
+ is_outbound_from_holder: false,
+ counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: msg.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ }),
+ funding_outpoint: None
+ },
counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
counterparty_prev_commitment_point: None,
commitment_secrets: CounterpartyCommitmentSecrets::new(),
network_sync: UpdateStatus::Fresh,
+
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
};
Ok(chan)
}
- // Utilities to build transactions:
-
- fn get_commitment_transaction_number_obscure_factor(&self) -> u64 {
- let mut sha = Sha256::engine();
-
- let counterparty_payment_point = &self.counterparty_pubkeys.as_ref().unwrap().payment_point.serialize();
- if self.channel_outbound {
- sha.input(&self.holder_keys.pubkeys().payment_point.serialize());
- sha.input(counterparty_payment_point);
- } else {
- sha.input(counterparty_payment_point);
- sha.input(&self.holder_keys.pubkeys().payment_point.serialize());
- }
- let res = Sha256::from_engine(sha).into_inner();
-
- ((res[26] as u64) << 5*8) |
- ((res[27] as u64) << 4*8) |
- ((res[28] as u64) << 3*8) |
- ((res[29] as u64) << 2*8) |
- ((res[30] as u64) << 1*8) |
- ((res[31] as u64) << 0*8)
- }
-
/// Transaction nomenclature is somewhat confusing here as there are many different cases - a
/// transaction is referred to as "a's transaction" implying that a will be able to broadcast
/// the transaction. Thus, b will generally be sending a signature over such a transaction to
/// have not yet committed it. Such HTLCs will only be included in transactions which are being
/// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
/// which peer generated this transaction and "to whom" this transaction flows.
- /// Returns (the transaction built, the number of HTLC outputs which were present in the
+ /// Returns (the transaction info, the number of HTLC outputs which were present in the
/// transaction, the list of HTLCs which were not ignored when building the transaction).
/// Note that below-dust HTLCs are included in the third return value, but not the second, and
/// sources are provided only for outbound HTLCs in the third return value.
#[inline]
- fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, feerate_per_kw: u32, logger: &L) -> (Transaction, usize, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
- let obscured_commitment_transaction_number = self.get_commitment_transaction_number_obscure_factor() ^ (INITIAL_COMMITMENT_NUMBER - commitment_number);
-
- let txins = {
- let mut ins: Vec<TxIn> = Vec::new();
- ins.push(TxIn {
- previous_output: self.funding_txo.unwrap().into_bitcoin_outpoint(),
- script_sig: Script::new(),
- sequence: ((0x80 as u32) << 8*3) | ((obscured_commitment_transaction_number >> 3*8) as u32),
- witness: Vec::new(),
- });
- ins
- };
-
- let mut txouts: Vec<(TxOut, Option<(HTLCOutputInCommitment, Option<&HTLCSource>)>)> = Vec::with_capacity(self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len() + 2);
+ fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, feerate_per_kw: u32, logger: &L) -> (CommitmentTransaction, usize, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger {
let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
+ let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
+ let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
let mut remote_htlc_total_msat = 0;
let mut local_htlc_total_msat = 0;
let mut value_to_self_msat_offset = 0;
- log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), self.get_commitment_transaction_number_obscure_factor(), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
+ log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
macro_rules! get_htlc_in_commitment {
($htlc: expr, $offered: expr) => {
let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + (feerate_per_kw as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) {
log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
- txouts.push((TxOut {
- script_pubkey: chan_utils::get_htlc_redeemscript(&htlc_in_tx, &keys).to_v0_p2wsh(),
- value: $htlc.amount_msat / 1000
- }, Some((htlc_in_tx, $source))));
+ included_non_dust_htlcs.push((htlc_in_tx, $source));
} else {
log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
included_dust_htlcs.push((htlc_in_tx, $source));
let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + (feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) {
log_trace!(logger, " ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
- txouts.push((TxOut { // "received HTLC output"
- script_pubkey: chan_utils::get_htlc_redeemscript(&htlc_in_tx, &keys).to_v0_p2wsh(),
- value: $htlc.amount_msat / 1000
- }, Some((htlc_in_tx, $source))));
+ included_non_dust_htlcs.push((htlc_in_tx, $source));
} else {
log_trace!(logger, " ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, log_bytes!($htlc.payment_hash.0), $htlc.amount_msat);
included_dust_htlcs.push((htlc_in_tx, $source));
};
debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis as i64);
broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
- debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) as i64);
+ debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) as i64);
broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
}
- let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (txouts.len() as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
- let (value_to_self, value_to_remote) = if self.channel_outbound {
+ let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (included_non_dust_htlcs.len() as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
+ let (value_to_self, value_to_remote) = if self.is_outbound() {
(value_to_self_msat / 1000 - total_fee as i64, value_to_remote_msat / 1000)
} else {
(value_to_self_msat / 1000, value_to_remote_msat / 1000 - total_fee as i64)
};
- let value_to_a = if local { value_to_self } else { value_to_remote };
- let value_to_b = if local { value_to_remote } else { value_to_self };
+ let mut value_to_a = if local { value_to_self } else { value_to_remote };
+ let mut value_to_b = if local { value_to_remote } else { value_to_self };
if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
log_trace!(logger, " ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
- txouts.push((TxOut {
- script_pubkey: chan_utils::get_revokeable_redeemscript(&keys.revocation_key,
- if local { self.counterparty_selected_contest_delay } else { self.holder_selected_contest_delay },
- &keys.broadcaster_delayed_payment_key).to_v0_p2wsh(),
- value: value_to_a as u64
- }, None));
+ } else {
+ value_to_a = 0;
}
if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
log_trace!(logger, " ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
- let static_payment_pk = if local {
- self.counterparty_pubkeys.as_ref().unwrap().payment_point
- } else {
- self.holder_keys.pubkeys().payment_point
- }.serialize();
- txouts.push((TxOut {
- script_pubkey: Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
- .push_slice(&WPubkeyHash::hash(&static_payment_pk)[..])
- .into_script(),
- value: value_to_b as u64
- }, None));
- }
-
- transaction_utils::sort_outputs(&mut txouts, |a, b| {
- if let &Some(ref a_htlc) = a {
- if let &Some(ref b_htlc) = b {
- a_htlc.0.cltv_expiry.cmp(&b_htlc.0.cltv_expiry)
- // Note that due to hash collisions, we have to have a fallback comparison
- // here for fuzztarget mode (otherwise at least chanmon_fail_consistency
- // may fail)!
- .then(a_htlc.0.payment_hash.0.cmp(&b_htlc.0.payment_hash.0))
- // For non-HTLC outputs, if they're copying our SPK we don't really care if we
- // close the channel due to mismatches - they're doing something dumb:
- } else { cmp::Ordering::Equal }
- } else { cmp::Ordering::Equal }
- });
-
- let mut outputs: Vec<TxOut> = Vec::with_capacity(txouts.len());
- let mut htlcs_included: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(txouts.len() + included_dust_htlcs.len());
- for (idx, mut out) in txouts.drain(..).enumerate() {
- outputs.push(out.0);
- if let Some((mut htlc, source_option)) = out.1.take() {
- htlc.transaction_output_index = Some(idx as u32);
- htlcs_included.push((htlc, source_option));
- }
+ } else {
+ value_to_b = 0;
}
- let non_dust_htlc_count = htlcs_included.len();
+
+ let num_nondust_htlcs = included_non_dust_htlcs.len();
+
+ let channel_parameters =
+ if local { self.channel_transaction_parameters.as_holder_broadcastable() }
+ else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
+ let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
+ value_to_a as u64,
+ value_to_b as u64,
+ keys.clone(),
+ feerate_per_kw,
+ &mut included_non_dust_htlcs,
+ &channel_parameters
+ );
+ let mut htlcs_included = included_non_dust_htlcs;
+ // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
+ htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
htlcs_included.append(&mut included_dust_htlcs);
- (Transaction {
- version: 2,
- lock_time: ((0x20 as u32) << 8*3) | ((obscured_commitment_transaction_number & 0xffffffu64) as u32),
- input: txins,
- output: outputs,
- }, non_dust_htlc_count, htlcs_included)
+ (tx, num_nondust_htlcs, htlcs_included)
}
#[inline]
let txins = {
let mut ins: Vec<TxIn> = Vec::new();
ins.push(TxIn {
- previous_output: self.funding_txo.unwrap().into_bitcoin_outpoint(),
+ previous_output: self.funding_outpoint().into_bitcoin_outpoint(),
script_sig: Script::new(),
sequence: 0xffffffff,
witness: Vec::new(),
let mut txouts: Vec<(TxOut, ())> = Vec::new();
let mut total_fee_satoshis = proposed_total_fee_satoshis;
- let value_to_self: i64 = (self.value_to_self_msat as i64) / 1000 - if self.channel_outbound { total_fee_satoshis as i64 } else { 0 };
- let value_to_remote: i64 = ((self.channel_value_satoshis * 1000 - self.value_to_self_msat) as i64 / 1000) - if self.channel_outbound { 0 } else { total_fee_satoshis as i64 };
+ let value_to_self: i64 = (self.value_to_self_msat as i64) / 1000 - if self.is_outbound() { total_fee_satoshis as i64 } else { 0 };
+ let value_to_remote: i64 = ((self.channel_value_satoshis * 1000 - self.value_to_self_msat) as i64 / 1000) - if self.is_outbound() { 0 } else { total_fee_satoshis as i64 };
if value_to_self < 0 {
- assert!(self.channel_outbound);
+ assert!(self.is_outbound());
total_fee_satoshis += (-value_to_self) as u64;
} else if value_to_remote < 0 {
- assert!(!self.channel_outbound);
+ assert!(!self.is_outbound());
total_fee_satoshis += (-value_to_remote) as u64;
}
}, total_fee_satoshis)
}
+ fn funding_outpoint(&self) -> OutPoint {
+ self.channel_transaction_parameters.funding_outpoint.unwrap()
+ }
+
#[inline]
/// Creates a set of keys for build_commitment_transaction to generate a transaction which our
/// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
/// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
/// TODO Some magic rust shit to compile-time check this?
fn build_holder_transaction_keys(&self, commitment_number: u64) -> Result<TxCreationKeys, ChannelError> {
- let per_commitment_point = self.holder_keys.get_per_commitment_point(commitment_number, &self.secp_ctx);
- let delayed_payment_base = &self.holder_keys.pubkeys().delayed_payment_basepoint;
- let htlc_basepoint = &self.holder_keys.pubkeys().htlc_basepoint;
- let counterparty_pubkeys = self.counterparty_pubkeys.as_ref().unwrap();
+ let per_commitment_point = self.holder_signer.get_per_commitment_point(commitment_number, &self.secp_ctx);
+ let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
+ let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
+ let counterparty_pubkeys = self.get_counterparty_pubkeys();
Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint), "Local tx keys generation got bogus keys".to_owned()))
}
fn build_remote_transaction_keys(&self) -> Result<TxCreationKeys, ChannelError> {
//TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
//may see payments to it!
- let revocation_basepoint = &self.holder_keys.pubkeys().revocation_basepoint;
- let htlc_basepoint = &self.holder_keys.pubkeys().htlc_basepoint;
- let counterparty_pubkeys = self.counterparty_pubkeys.as_ref().unwrap();
+ let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
+ let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
+ let counterparty_pubkeys = self.get_counterparty_pubkeys();
Ok(secp_check!(TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint), "Remote tx keys generation got bogus keys".to_owned()))
}
/// pays to get_funding_redeemscript().to_v0_p2wsh()).
/// Panics if called before accept_channel/new_from_req
pub fn get_funding_redeemscript(&self) -> Script {
- make_funding_redeemscript(&self.holder_keys.pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
+ make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
}
/// Builds the htlc-success or htlc-timeout transaction which spends a given HTLC output
/// @local is used only to convert relevant internal structures which refer to remote vs local
/// to decide value of outputs and direction of HTLCs.
fn build_htlc_transaction(&self, prev_hash: &Txid, htlc: &HTLCOutputInCommitment, local: bool, keys: &TxCreationKeys, feerate_per_kw: u32) -> Transaction {
- chan_utils::build_htlc_transaction(prev_hash, feerate_per_kw, if local { self.counterparty_selected_contest_delay } else { self.holder_selected_contest_delay }, htlc, &keys.broadcaster_delayed_payment_key, &keys.revocation_key)
+ chan_utils::build_htlc_transaction(prev_hash, feerate_per_kw, if local { self.get_counterparty_selected_contest_delay() } else { self.get_holder_selected_contest_delay() }, htlc, &keys.broadcaster_delayed_payment_key, &keys.revocation_key)
}
/// Per HTLC, only one get_update_fail_htlc or get_update_fulfill_htlc call may be made.
pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, config: &UserConfig, their_features: InitFeatures) -> Result<(), ChannelError> {
// Check sanity of message fields:
- if !self.channel_outbound {
+ if !self.is_outbound() {
return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
}
if self.channel_state != ChannelState::OurInitSent as u32 {
if msg.channel_reserve_satoshis < self.holder_dust_limit_satoshis {
return Err(ChannelError::Close(format!("Peer never wants payout outputs? channel_reserve_satoshis was ({}). dust_limit is ({})", msg.channel_reserve_satoshis, self.holder_dust_limit_satoshis)));
}
- let remote_reserve = Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
+ let remote_reserve = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
if msg.dust_limit_satoshis > remote_reserve {
return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, remote_reserve)));
}
if msg.max_accepted_htlcs < 1 {
return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
}
- if msg.max_accepted_htlcs > 483 {
- return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than 483", msg.max_accepted_htlcs)));
+ if msg.max_accepted_htlcs > MAX_HTLCS {
+ return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
}
// Now check against optional parameters as set by config...
let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
match &msg.shutdown_scriptpubkey {
&OptionalField::Present(ref script) => {
- // Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. We enforce it while receiving shutdown msg
- if script.is_p2pkh() || script.is_p2sh() || script.is_v0_p2wsh() || script.is_v0_p2wpkh() {
- Some(script.clone())
// Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
- } else if script.len() == 0 {
+ if script.len() == 0 {
None
// Peer is signaling upfront_shutdown and has provided a non-accepted scriptpubkey format. Fail the channel
+ } else if is_unsupported_shutdown_script(&their_features, script) {
+ return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: ({})", script.to_bytes().to_hex())));
} else {
- return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. scriptpubkey: ({})", script.to_bytes().to_hex())));
+ Some(script.clone())
}
},
// Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
self.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.channel_value_satoshis * 1000);
self.counterparty_selected_channel_reserve_satoshis = msg.channel_reserve_satoshis;
self.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
- self.counterparty_selected_contest_delay = msg.to_self_delay;
self.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
self.minimum_depth = msg.minimum_depth;
htlc_basepoint: msg.htlc_basepoint
};
- self.holder_keys.on_accept(&counterparty_pubkeys, msg.to_self_delay, self.holder_selected_contest_delay);
- self.counterparty_pubkeys = Some(counterparty_pubkeys);
+ self.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
+ selected_contest_delay: msg.to_self_delay,
+ pubkeys: counterparty_pubkeys,
+ });
self.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
self.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
Ok(())
}
- fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Transaction, HolderCommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
+ fn funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<(Txid, CommitmentTransaction, Signature), ChannelError> where L::Target: Logger {
let funding_script = self.get_funding_redeemscript();
let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, self.feerate_per_kw, logger).0;
- let sighash = hash_to_message!(&bip143::SigHashCache::new(&initial_commitment_tx).signature_hash(0, &funding_script, self.channel_value_satoshis, SigHashType::All)[..]);
-
- // They sign the "our" commitment transaction...
- log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_tx), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
- secp_check!(self.secp_ctx.verify(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
-
- let tx = HolderCommitmentTransaction::new_missing_holder_sig(initial_commitment_tx, sig.clone(), &self.holder_keys.pubkeys().funding_pubkey, self.counterparty_funding_pubkey(), keys, self.feerate_per_kw, Vec::new());
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
+ // They sign the holder commitment transaction...
+ log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
+ secp_check!(self.secp_ctx.verify(&sighash, &sig, self.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
+ }
let counterparty_keys = self.build_remote_transaction_keys()?;
let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0;
- let pre_remote_keys = PreCalculatedTxCreationKeys::new(counterparty_keys);
- let counterparty_signature = self.holder_keys.sign_counterparty_commitment(self.feerate_per_kw, &counterparty_initial_commitment_tx, &pre_remote_keys, &Vec::new(), &self.secp_ctx)
+
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
+ log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
+
+ let counterparty_signature = self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx)
.map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0;
// We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
- Ok((counterparty_initial_commitment_tx, tx, counterparty_signature))
+ Ok((counterparty_initial_bitcoin_tx.txid, initial_commitment_tx, counterparty_signature))
}
fn counterparty_funding_pubkey(&self) -> &PublicKey {
- &self.counterparty_pubkeys.as_ref().expect("funding_pubkey() only allowed after accept_channel").funding_pubkey
+ &self.get_counterparty_pubkeys().funding_pubkey
}
- pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<ChanSigner>), ChannelError> where L::Target: Logger {
- if self.channel_outbound {
+ pub fn funding_created<L: Deref>(&mut self, msg: &msgs::FundingCreated, logger: &L) -> Result<(msgs::FundingSigned, ChannelMonitor<Signer>), ChannelError> where L::Target: Logger {
+ if self.is_outbound() {
return Err(ChannelError::Close("Received funding_created for an outbound channel?".to_owned()));
}
if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
- let funding_txo = OutPoint{ txid: msg.funding_txid, index: msg.funding_output_index };
- self.funding_txo = Some(funding_txo.clone());
+ let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
+ self.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+ // This is an externally observable change before we finish all our checks. In particular
+ // funding_created_signature may fail.
+ self.holder_signer.ready_channel(&self.channel_transaction_parameters);
- let (counterparty_initial_commitment_tx, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
+ let (counterparty_initial_commitment_txid, initial_commitment_tx, signature) = match self.funding_created_signature(&msg.signature, logger) {
Ok(res) => res,
+ Err(ChannelError::Close(e)) => {
+ self.channel_transaction_parameters.funding_outpoint = None;
+ return Err(ChannelError::Close(e));
+ },
Err(e) => {
- self.funding_txo = None;
- return Err(e);
+ // The only error we know how to handle is ChannelError::Close, so we fall over here
+ // to make sure we don't continue with an inconsistent state.
+ panic!("unexpected error type from funding_created_signature {:?}", e);
}
};
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.get_holder_pubkeys().funding_pubkey,
+ self.counterparty_funding_pubkey()
+ );
+
// Now that we're past error-generating stuff, update our local state:
- let counterparty_pubkeys = self.counterparty_pubkeys.as_ref().unwrap();
let funding_redeemscript = self.get_funding_redeemscript();
let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- macro_rules! create_monitor {
- () => { {
- let mut channel_monitor = ChannelMonitor::new(self.holder_keys.clone(),
- &self.shutdown_pubkey, self.holder_selected_contest_delay,
- &self.destination_script, (funding_txo, funding_txo_script.clone()),
- &counterparty_pubkeys.htlc_basepoint, &counterparty_pubkeys.delayed_payment_basepoint,
- self.counterparty_selected_contest_delay, funding_redeemscript.clone(), self.channel_value_satoshis,
- self.get_commitment_transaction_number_obscure_factor(),
- initial_commitment_tx.clone());
-
- channel_monitor.provide_latest_counterparty_commitment_tx_info(&counterparty_initial_commitment_tx, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
- channel_monitor
- } }
- }
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound());
+ let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), self.holder_signer.clone(),
+ &self.shutdown_pubkey, self.get_holder_selected_contest_delay(),
+ &self.destination_script, (funding_txo, funding_txo_script.clone()),
+ &self.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx);
- let channel_monitor = create_monitor!();
+ channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_commitment_txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
self.channel_state = ChannelState::FundingSent as u32;
self.channel_id = funding_txo.to_channel_id();
/// Handles a funding_signed message from the remote end.
/// If this call is successful, broadcast the funding transaction (and not before!)
- pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, logger: &L) -> Result<ChannelMonitor<ChanSigner>, ChannelError> where L::Target: Logger {
- if !self.channel_outbound {
+ pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, logger: &L) -> Result<ChannelMonitor<Signer>, ChannelError> where L::Target: Logger {
+ if !self.is_outbound() {
return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
}
if self.channel_state & !(ChannelState::MonitorUpdateFailed as u32) != ChannelState::FundingCreated as u32 {
let counterparty_keys = self.build_remote_transaction_keys()?;
let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0;
+ let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
+ let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
- let holder_keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
- let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_keys, true, false, self.feerate_per_kw, logger).0;
- let sighash = hash_to_message!(&bip143::SigHashCache::new(&initial_commitment_tx).signature_hash(0, &funding_script, self.channel_value_satoshis, SigHashType::All)[..]);
-
- let counterparty_funding_pubkey = &self.counterparty_pubkeys.as_ref().unwrap().funding_pubkey;
+ log_trace!(logger, "Initial counterparty ID {} tx {}", counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
- // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
- if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, counterparty_funding_pubkey) {
- return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
+ let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?;
+ let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, self.feerate_per_kw, logger).0;
+ {
+ let trusted_tx = initial_commitment_tx.trust();
+ let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
+ // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
+ if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
+ return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned()));
+ }
}
- let counterparty_pubkeys = self.counterparty_pubkeys.as_ref().unwrap();
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ initial_commitment_tx,
+ msg.signature,
+ Vec::new(),
+ &self.get_holder_pubkeys().funding_pubkey,
+ self.counterparty_funding_pubkey()
+ );
+
+
let funding_redeemscript = self.get_funding_redeemscript();
- let funding_txo = self.funding_txo.as_ref().unwrap();
+ let funding_txo = self.get_funding_txo().unwrap();
let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
- macro_rules! create_monitor {
- () => { {
- let commitment_tx = HolderCommitmentTransaction::new_missing_holder_sig(initial_commitment_tx.clone(), msg.signature.clone(), &self.holder_keys.pubkeys().funding_pubkey, counterparty_funding_pubkey, holder_keys.clone(), self.feerate_per_kw, Vec::new());
- let mut channel_monitor = ChannelMonitor::new(self.holder_keys.clone(),
- &self.shutdown_pubkey, self.holder_selected_contest_delay,
- &self.destination_script, (funding_txo.clone(), funding_txo_script.clone()),
- &counterparty_pubkeys.htlc_basepoint, &counterparty_pubkeys.delayed_payment_basepoint,
- self.counterparty_selected_contest_delay, funding_redeemscript.clone(), self.channel_value_satoshis,
- self.get_commitment_transaction_number_obscure_factor(),
- commitment_tx);
-
- channel_monitor.provide_latest_counterparty_commitment_tx_info(&counterparty_initial_commitment_tx, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
-
- channel_monitor
- } }
- }
+ let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound());
+ let channel_monitor = ChannelMonitor::new(self.secp_ctx.clone(), self.holder_signer.clone(),
+ &self.shutdown_pubkey, self.get_holder_selected_contest_delay(),
+ &self.destination_script, (funding_txo, funding_txo_script),
+ &self.channel_transaction_parameters,
+ funding_redeemscript.clone(), self.channel_value_satoshis,
+ obscure_factor,
+ holder_commitment_tx);
- let channel_monitor = create_monitor!();
+ channel_monitor.provide_latest_counterparty_commitment_tx(counterparty_initial_bitcoin_tx.txid, Vec::new(), self.cur_counterparty_commitment_transaction_number, self.counterparty_cur_commitment_point.unwrap(), logger);
assert_eq!(self.channel_state & (ChannelState::MonitorUpdateFailed as u32), 0); // We have no had any monitor(s) yet to fail update!
self.channel_state = ChannelState::FundingSent as u32;
(COMMITMENT_TX_BASE_WEIGHT + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * self.feerate_per_kw as u64 / 1000 * 1000
}
- // Get the commitment tx fee for the local (i.e our) next commitment transaction
- // based on the number of pending HTLCs that are on track to be in our next
- // commitment tx. `addl_htcs` is an optional parameter allowing the caller
- // to add a number of additional HTLCs to the calculation. Note that dust
- // HTLCs are excluded.
- fn next_local_commit_tx_fee_msat(&self, addl_htlcs: usize) -> u64 {
- assert!(self.channel_outbound);
+ // Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
+ // number of pending HTLCs that are on track to be in our next commitment tx, plus an additional
+ // HTLC if `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs
+ // are excluded.
+ fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+ assert!(self.is_outbound());
+
+ let real_dust_limit_success_sat = (self.feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis;
+ let real_dust_limit_timeout_sat = (self.feerate_per_kw as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.holder_dust_limit_satoshis;
+
+ let mut addl_htlcs = 0;
+ if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+ match htlc.origin {
+ HTLCInitiator::LocalOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+ addl_htlcs += 1;
+ }
+ },
+ HTLCInitiator::RemoteOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+ addl_htlcs += 1;
+ }
+ }
+ }
+
+ let mut included_htlcs = 0;
+ for ref htlc in self.pending_inbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
+ continue
+ }
+ // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
+ // transaction including this HTLC if it times out before they RAA.
+ included_htlcs += 1;
+ }
- let mut their_acked_htlcs = self.pending_inbound_htlcs.len();
for ref htlc in self.pending_outbound_htlcs.iter() {
- if htlc.amount_msat / 1000 <= self.holder_dust_limit_satoshis {
+ if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
continue
}
match htlc.state {
- OutboundHTLCState::Committed => their_acked_htlcs += 1,
- OutboundHTLCState::RemoteRemoved {..} => their_acked_htlcs += 1,
- OutboundHTLCState::LocalAnnounced {..} => their_acked_htlcs += 1,
+ OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
+ OutboundHTLCState::Committed => included_htlcs += 1,
+ OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+ // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
+ // transaction won't be generated until they send us their next RAA, which will mean
+ // dropping any HTLCs in this state.
_ => {},
}
}
for htlc in self.holding_cell_htlc_updates.iter() {
match htlc {
- &HTLCUpdateAwaitingACK::AddHTLC { .. } => their_acked_htlcs += 1,
- _ => {},
+ &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
+ if amount_msat / 1000 < real_dust_limit_timeout_sat {
+ continue
+ }
+ included_htlcs += 1
+ },
+ _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
+ // ack we're guaranteed to never include them in commitment txs anymore.
}
}
- self.commit_tx_fee_msat(their_acked_htlcs + addl_htlcs)
+ let num_htlcs = included_htlcs + addl_htlcs;
+ let res = self.commit_tx_fee_msat(num_htlcs);
+ #[cfg(any(test, feature = "fuzztarget"))]
+ {
+ let mut fee = res;
+ if fee_spike_buffer_htlc.is_some() {
+ fee = self.commit_tx_fee_msat(num_htlcs - 1);
+ }
+ let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len()
+ + self.holding_cell_htlc_updates.len();
+ let commitment_tx_info = CommitmentTxInfoCached {
+ fee,
+ total_pending_htlcs,
+ next_holder_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => self.next_holder_htlc_id + 1,
+ HTLCInitiator::RemoteOffered => self.next_holder_htlc_id,
+ },
+ next_counterparty_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => self.next_counterparty_htlc_id,
+ HTLCInitiator::RemoteOffered => self.next_counterparty_htlc_id + 1,
+ },
+ feerate: self.feerate_per_kw,
+ };
+ *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
+ }
+ res
}
- // Get the commitment tx fee for the remote's next commitment transaction
- // based on the number of pending HTLCs that are on track to be in their
- // next commitment tx. `addl_htcs` is an optional parameter allowing the caller
- // to add a number of additional HTLCs to the calculation. Note that dust HTLCs
- // are excluded.
- fn next_remote_commit_tx_fee_msat(&self, addl_htlcs: usize) -> u64 {
- assert!(!self.channel_outbound);
+ // Get the commitment tx fee for the remote's next commitment transaction based on the number of
+ // pending HTLCs that are on track to be in their next commitment tx, plus an additional HTLC if
+ // `fee_spike_buffer_htlc` is Some, plus a new HTLC given by `new_htlc_amount`. Dust HTLCs are
+ // excluded.
+ fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
+ assert!(!self.is_outbound());
+
+ let real_dust_limit_success_sat = (self.feerate_per_kw as u64 * HTLC_SUCCESS_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis;
+ let real_dust_limit_timeout_sat = (self.feerate_per_kw as u64 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + self.counterparty_dust_limit_satoshis;
+
+ let mut addl_htlcs = 0;
+ if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
+ match htlc.origin {
+ HTLCInitiator::LocalOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
+ addl_htlcs += 1;
+ }
+ },
+ HTLCInitiator::RemoteOffered => {
+ if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
+ addl_htlcs += 1;
+ }
+ }
+ }
+
+ // When calculating the set of HTLCs which will be included in their next commitment_signed, all
+ // non-dust inbound HTLCs are included (as all states imply it will be included) and only
+ // committed outbound HTLCs, see below.
+ let mut included_htlcs = 0;
+ for ref htlc in self.pending_inbound_htlcs.iter() {
+ if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
+ continue
+ }
+ included_htlcs += 1;
+ }
- // When calculating the set of HTLCs which will be included in their next
- // commitment_signed, all inbound HTLCs are included (as all states imply it will be
- // included) and only committed outbound HTLCs, see below.
- let mut their_acked_htlcs = self.pending_inbound_htlcs.len();
for ref htlc in self.pending_outbound_htlcs.iter() {
- if htlc.amount_msat / 1000 <= self.counterparty_dust_limit_satoshis {
+ if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
continue
}
- // We only include outbound HTLCs if it will not be included in their next
- // commitment_signed, i.e. if they've responded to us with an RAA after announcement.
+ // We only include outbound HTLCs if it will not be included in their next commitment_signed,
+ // i.e. if they've responded to us with an RAA after announcement.
match htlc.state {
- OutboundHTLCState::Committed => their_acked_htlcs += 1,
- OutboundHTLCState::RemoteRemoved {..} => their_acked_htlcs += 1,
+ OutboundHTLCState::Committed => included_htlcs += 1,
+ OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
+ OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
_ => {},
}
}
- self.commit_tx_fee_msat(their_acked_htlcs + addl_htlcs)
+ let num_htlcs = included_htlcs + addl_htlcs;
+ let res = self.commit_tx_fee_msat(num_htlcs);
+ #[cfg(any(test, feature = "fuzztarget"))]
+ {
+ let mut fee = res;
+ if fee_spike_buffer_htlc.is_some() {
+ fee = self.commit_tx_fee_msat(num_htlcs - 1);
+ }
+ let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
+ let commitment_tx_info = CommitmentTxInfoCached {
+ fee,
+ total_pending_htlcs,
+ next_holder_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => self.next_holder_htlc_id + 1,
+ HTLCInitiator::RemoteOffered => self.next_holder_htlc_id,
+ },
+ next_counterparty_htlc_id: match htlc.origin {
+ HTLCInitiator::LocalOffered => self.next_counterparty_htlc_id,
+ HTLCInitiator::RemoteOffered => self.next_counterparty_htlc_id + 1,
+ },
+ feerate: self.feerate_per_kw,
+ };
+ *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
+ }
+ res
}
pub fn update_add_htlc<F, L: Deref>(&mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus, create_pending_htlc_status: F, logger: &L) -> Result<(), ChannelError>
if inbound_htlc_count + 1 > OUR_MAX_HTLCS as u32 {
return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", OUR_MAX_HTLCS)));
}
- let holder_max_htlc_value_in_flight_msat = Channel::<ChanSigner>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis);
+ let holder_max_htlc_value_in_flight_msat = Channel::<Signer>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis);
if htlc_inbound_value_msat + msg.amount_msat > holder_max_htlc_value_in_flight_msat {
return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", holder_max_htlc_value_in_flight_msat)));
}
// Check that the remote can afford to pay for this HTLC on-chain at the current
// feerate_per_kw, while maintaining their channel reserve (as required by the spec).
- let remote_commit_tx_fee_msat = if self.channel_outbound { 0 } else {
- // +1 for this HTLC.
- self.next_remote_commit_tx_fee_msat(1)
+ let remote_commit_tx_fee_msat = if self.is_outbound() { 0 } else {
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ self.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
};
if pending_remote_value_msat - msg.amount_msat < remote_commit_tx_fee_msat {
return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
};
let chan_reserve_msat =
- Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) * 1000;
+ Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis) * 1000;
if pending_remote_value_msat - msg.amount_msat - remote_commit_tx_fee_msat < chan_reserve_msat {
return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
}
- if !self.channel_outbound {
- // `+1` for this HTLC, `2 *` and `+1` fee spike buffer we keep for the remote. This deviates from the
- // spec because in the spec, the fee spike buffer requirement doesn't exist on the receiver's side,
- // only on the sender's.
- // Note that when we eventually remove support for fee updates and switch to anchor output fees,
- // we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep the extra +1
- // as we should still be able to afford adding this HTLC plus one more future HTLC, regardless of
- // being sensitive to fee spikes.
- let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(1 + 1);
+ if !self.is_outbound() {
+ // `2 *` and `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
+ // the spec because in the spec, the fee spike buffer requirement doesn't exist on the
+ // receiver's side, only on the sender's.
+ // Note that when we eventually remove support for fee updates and switch to anchor output
+ // fees, we will drop the `2 *`, since we no longer be as sensitive to fee spikes. But, keep
+ // the extra htlc when calculating the next remote commitment transaction fee as we should
+ // still be able to afford adding this HTLC plus one more future HTLC, regardless of being
+ // sensitive to fee spikes.
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let remote_fee_cost_incl_stuck_buffer_msat = 2 * self.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
if pending_remote_value_msat - msg.amount_msat - chan_reserve_msat < remote_fee_cost_incl_stuck_buffer_msat {
// Note that if the pending_forward_status is not updated here, then it's because we're already failing
// the HTLC, i.e. its status is already set to failing.
}
} else {
// Check that they won't violate our local required channel reserve by adding this HTLC.
-
- // +1 for this HTLC.
- let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(1);
+ let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
+ let local_commit_tx_fee_msat = self.next_local_commit_tx_fee_msat(htlc_candidate, None);
if self.value_to_self_msat < self.counterparty_selected_channel_reserve_satoshis * 1000 + local_commit_tx_fee_msat {
return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
}
let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number).map_err(|e| (None, e))?;
let mut update_fee = false;
- let feerate_per_kw = if !self.channel_outbound && self.pending_update_fee.is_some() {
+ let feerate_per_kw = if !self.is_outbound() && self.pending_update_fee.is_some() {
update_fee = true;
self.pending_update_fee.unwrap()
} else {
self.feerate_per_kw
};
- let mut commitment_tx = {
- let mut commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, feerate_per_kw, logger);
- let htlcs_cloned: Vec<_> = commitment_tx.2.drain(..).map(|htlc| (htlc.0, htlc.1.map(|h| h.clone()))).collect();
- (commitment_tx.0, commitment_tx.1, htlcs_cloned)
+ let (num_htlcs, mut htlcs_cloned, commitment_tx, commitment_txid) = {
+ let commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, feerate_per_kw, logger);
+ let commitment_txid = {
+ let trusted_tx = commitment_tx.0.trust();
+ let bitcoin_tx = trusted_tx.built_transaction();
+ let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.channel_value_satoshis);
+
+ log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
+ if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
+ return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
+ }
+ bitcoin_tx.txid
+ };
+ let htlcs_cloned: Vec<_> = commitment_tx.2.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
+ (commitment_tx.1, htlcs_cloned, commitment_tx.0, commitment_txid)
};
- let commitment_txid = commitment_tx.0.txid();
- let sighash = hash_to_message!(&bip143::SigHashCache::new(&commitment_tx.0).signature_hash(0, &funding_script, self.channel_value_satoshis, SigHashType::All)[..]);
- log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {}", log_bytes!(msg.signature.serialize_compact()[..]), log_bytes!(self.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&commitment_tx.0), log_bytes!(sighash[..]), encode::serialize_hex(&funding_script));
- if let Err(_) = self.secp_ctx.verify(&sighash, &msg.signature, &self.counterparty_funding_pubkey()) {
- return Err((None, ChannelError::Close("Invalid commitment tx signature from peer".to_owned())));
- }
+ let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
//If channel fee was updated by funder confirm funder can afford the new fee rate when applied to the current local commitment transaction
if update_fee {
- let num_htlcs = commitment_tx.1;
- let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000;
-
- let counterparty_reserve_we_require = Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
+ let counterparty_reserve_we_require = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
if self.channel_value_satoshis - self.value_to_self_msat / 1000 < total_fee + counterparty_reserve_we_require {
return Err((None, ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned())));
}
}
+ #[cfg(any(test, feature = "fuzztarget"))]
+ {
+ if self.is_outbound() {
+ let projected_commit_tx_info = self.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len()
+ + self.holding_cell_htlc_updates.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id
+ && info.feerate == self.feerate_per_kw {
+ assert_eq!(total_fee, info.fee / 1000);
+ }
+ }
+ }
+ }
- if msg.htlc_signatures.len() != commitment_tx.1 {
- return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_tx.1))));
+ if msg.htlc_signatures.len() != num_htlcs {
+ return Err((None, ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), num_htlcs))));
}
- // TODO: Merge these two, sadly they are currently both required to be passed separately to
- // ChannelMonitor:
- let mut htlcs_without_source = Vec::with_capacity(commitment_tx.2.len());
- let mut htlcs_and_sigs = Vec::with_capacity(commitment_tx.2.len());
- for (idx, (htlc, source)) in commitment_tx.2.drain(..).enumerate() {
+ // TODO: Sadly, we pass HTLCs twice to ChannelMonitor: once via the HolderCommitmentTransaction and once via the update
+ let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
+ for (idx, (htlc, source)) in htlcs_cloned.drain(..).enumerate() {
if let Some(_) = htlc.transaction_output_index {
let htlc_tx = self.build_htlc_transaction(&commitment_txid, &htlc, true, &keys, feerate_per_kw);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys);
if let Err(_) = self.secp_ctx.verify(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key) {
return Err((None, ChannelError::Close("Invalid HTLC tx signature from peer".to_owned())));
}
- htlcs_without_source.push((htlc.clone(), Some(msg.htlc_signatures[idx])));
htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source));
} else {
- htlcs_without_source.push((htlc.clone(), None));
htlcs_and_sigs.push((htlc, None, source));
}
}
- let next_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number - 1, &self.secp_ctx);
- let per_commitment_secret = self.holder_keys.release_commitment_secret(self.cur_holder_commitment_transaction_number + 1);
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ commitment_tx,
+ msg.signature,
+ msg.htlc_signatures.clone(),
+ &self.get_holder_pubkeys().funding_pubkey,
+ self.counterparty_funding_pubkey()
+ );
+
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number - 1, &self.secp_ctx);
+ let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 1);
// Update state now that we've passed all the can-fail calls...
let mut need_commitment = false;
- if !self.channel_outbound {
+ if !self.is_outbound() {
if let Some(fee_update) = self.pending_update_fee {
self.feerate_per_kw = fee_update;
// We later use the presence of pending_update_fee to indicate we should generate a
}
}
- let counterparty_funding_pubkey = self.counterparty_pubkeys.as_ref().unwrap().funding_pubkey;
-
self.latest_monitor_update_id += 1;
let mut monitor_update = ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
- commitment_tx: HolderCommitmentTransaction::new_missing_holder_sig(commitment_tx.0, msg.signature.clone(), &self.holder_keys.pubkeys().funding_pubkey, &counterparty_funding_pubkey, keys, self.feerate_per_kw, htlcs_without_source),
+ commitment_tx: holder_commitment_tx,
htlc_outputs: htlcs_and_sigs
}]
};
return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
}
+ #[cfg(any(test, feature = "fuzztarget"))]
+ {
+ *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ *self.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ }
+
self.commitment_secrets.provide_secret(self.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
.map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
self.latest_monitor_update_id += 1;
}
self.value_to_self_msat = (self.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
- if self.channel_outbound {
+ if self.is_outbound() {
if let Some(feerate) = self.pending_update_fee.take() {
self.feerate_per_kw = feerate;
}
/// further details on the optionness of the return value.
/// You MUST call send_commitment prior to any other calls on this Channel
fn send_update_fee(&mut self, feerate_per_kw: u32) -> Option<msgs::UpdateFee> {
- if !self.channel_outbound {
+ if !self.is_outbound() {
panic!("Cannot send fee from inbound channel");
}
if !self.is_usable() {
assert_eq!(self.channel_state & ChannelState::MonitorUpdateFailed as u32, ChannelState::MonitorUpdateFailed as u32);
self.channel_state &= !(ChannelState::MonitorUpdateFailed as u32);
- let needs_broadcast_safe = self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.channel_outbound;
+ let needs_broadcast_safe = self.channel_state & (ChannelState::FundingSent as u32) != 0 && self.is_outbound();
// Because we will never generate a FundingBroadcastSafe event when we're in
// MonitorUpdateFailed, if we assume the user only broadcast the funding transaction when
// monitor on funding_created, and we even got the funding transaction confirmed before the
// monitor was persisted.
let funding_locked = if self.monitor_pending_funding_locked {
- assert!(!self.channel_outbound, "Funding transaction broadcast without FundingBroadcastSafe!");
+ assert!(!self.is_outbound(), "Funding transaction broadcast without FundingBroadcastSafe!");
self.monitor_pending_funding_locked = false;
- let next_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
Some(msgs::FundingLocked {
channel_id: self.channel_id(),
next_per_commitment_point,
pub fn update_fee<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::UpdateFee) -> Result<(), ChannelError>
where F::Target: FeeEstimator
{
- if self.channel_outbound {
+ if self.is_outbound() {
return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
}
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
}
- Channel::<ChanSigner>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
+ Channel::<Signer>::check_remote_fee(fee_estimator, msg.feerate_per_kw)?;
self.pending_update_fee = Some(msg.feerate_per_kw);
self.update_time_counter += 1;
Ok(())
}
fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
- let next_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- let per_commitment_secret = self.holder_keys.release_commitment_secret(self.cur_holder_commitment_transaction_number + 2);
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ let per_commitment_secret = self.holder_signer.release_commitment_secret(self.cur_holder_commitment_transaction_number + 2);
msgs::RevokeAndACK {
channel_id: self.channel_id,
per_commitment_secret,
if msg.next_remote_commitment_number > 0 {
match msg.data_loss_protect {
OptionalField::Present(ref data_loss) => {
- let expected_point = self.holder_keys.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx);
+ let expected_point = self.holder_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.secp_ctx);
let given_secret = SecretKey::from_slice(&data_loss.your_last_per_commitment_secret)
.map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
if expected_point != PublicKey::from_secret_key(&self.secp_ctx, &given_secret) {
}
// We have OurFundingLocked set!
- let next_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
return Ok((Some(msgs::FundingLocked {
channel_id: self.channel_id(),
next_per_commitment_point,
let resend_funding_locked = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.cur_holder_commitment_transaction_number == 1 {
// We should never have to worry about MonitorUpdateFailed resending FundingLocked
- let next_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
Some(msgs::FundingLocked {
channel_id: self.channel_id(),
next_per_commitment_point,
fn maybe_propose_first_closing_signed<F: Deref>(&mut self, fee_estimator: &F) -> Option<msgs::ClosingSigned>
where F::Target: FeeEstimator
{
- if !self.channel_outbound || !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() ||
+ if !self.is_outbound() || !self.pending_inbound_htlcs.is_empty() || !self.pending_outbound_htlcs.is_empty() ||
self.channel_state & (BOTH_SIDES_SHUTDOWN_MASK | ChannelState::AwaitingRemoteRevoke as u32) != BOTH_SIDES_SHUTDOWN_MASK ||
self.last_sent_closing_fee.is_some() || self.pending_update_fee.is_some() {
return None;
let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(proposed_total_fee_satoshis, false);
- let sig = self.holder_keys
+ let sig = self.holder_signer
.sign_closing_transaction(&closing_tx, &self.secp_ctx)
.ok();
assert!(closing_tx.get_weight() as u64 <= tx_weight);
})
}
- pub fn shutdown<F: Deref>(&mut self, fee_estimator: &F, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
+ pub fn shutdown<F: Deref>(&mut self, fee_estimator: &F, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(Option<msgs::Shutdown>, Option<msgs::ClosingSigned>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
where F::Target: FeeEstimator
{
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
}
assert_eq!(self.channel_state & ChannelState::ShutdownComplete as u32, 0);
- // BOLT 2 says we must only send a scriptpubkey of certain standard forms, which are up to
- // 34 bytes in length, so don't let the remote peer feed us some super fee-heavy script.
- if self.channel_outbound && msg.scriptpubkey.len() > 34 {
- return Err(ChannelError::Close(format!("Got counterparty shutdown_scriptpubkey ({}) of absurd length from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
- }
-
- //Check counterparty_shutdown_scriptpubkey form as BOLT says we must
- if !msg.scriptpubkey.is_p2pkh() && !msg.scriptpubkey.is_p2sh() && !msg.scriptpubkey.is_v0_p2wpkh() && !msg.scriptpubkey.is_v0_p2wsh() {
+ if is_unsupported_shutdown_script(&their_features, &msg.scriptpubkey) {
return Err(ChannelError::Close(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_bytes().to_hex())));
}
tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
- let funding_key = self.holder_keys.pubkeys().funding_pubkey.serialize();
+ let funding_key = self.get_holder_pubkeys().funding_pubkey.serialize();
let counterparty_funding_key = self.counterparty_funding_pubkey().serialize();
if funding_key[..] < counterparty_funding_key[..] {
tx.input[0].witness.push(sig.serialize_der().to_vec());
}
let mut sighash = hash_to_message!(&bip143::SigHashCache::new(&closing_tx).signature_hash(0, &funding_redeemscript, self.channel_value_satoshis, SigHashType::All)[..]);
- let counterparty_funding_pubkey = &self.counterparty_pubkeys.as_ref().unwrap().funding_pubkey;
-
- match self.secp_ctx.verify(&sighash, &msg.signature, counterparty_funding_pubkey) {
+ match self.secp_ctx.verify(&sighash, &msg.signature, &self.get_counterparty_pubkeys().funding_pubkey) {
Ok(_) => {},
Err(_e) => {
// The remote end may have decided to revoke their output due to inconsistent dust
($new_feerate: expr) => {
let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
let (closing_tx, used_total_fee) = self.build_closing_transaction($new_feerate as u64 * tx_weight / 1000, false);
- let sig = self.holder_keys
+ let sig = self.holder_signer
.sign_closing_transaction(&closing_tx, &self.secp_ctx)
.map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
assert!(closing_tx.get_weight() as u64 <= tx_weight);
}
let mut min_feerate = 253;
- if self.channel_outbound {
+ if self.is_outbound() {
let max_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
if (msg.fee_satoshis as u64) > max_feerate as u64 * closing_tx_max_weight / 1000 {
if let Some((last_feerate, _, _)) = self.last_sent_closing_fee {
propose_new_feerate!(min_feerate);
}
- let sig = self.holder_keys
+ let sig = self.holder_signer
.sign_closing_transaction(&closing_tx, &self.secp_ctx)
.map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
/// Returns the funding_txo we either got from our peer, or were given by
/// get_outbound_funding_created.
pub fn get_funding_txo(&self) -> Option<OutPoint> {
- self.funding_txo
+ self.channel_transaction_parameters.funding_outpoint
+ }
+
+ fn get_holder_selected_contest_delay(&self) -> u16 {
+ self.channel_transaction_parameters.holder_selected_contest_delay
+ }
+
+ fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
+ &self.channel_transaction_parameters.holder_pubkeys
+ }
+
+ fn get_counterparty_selected_contest_delay(&self) -> u16 {
+ self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().selected_contest_delay
+ }
+
+ fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
+ &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
}
/// Allowed in any state (including after shutdown)
// Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
// to use full capacity. This is an effort to reduce routing failures, because in many cases
// channel might have been used to route very small values (either by honest users or as DoS).
- self.channel_value_satoshis * 9 / 10,
+ self.channel_value_satoshis * 1000 * 9 / 10,
- Channel::<ChanSigner>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis)
+ Channel::<Signer>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis)
);
}
}
#[cfg(test)]
- pub fn get_keys(&self) -> &ChanSigner {
- &self.holder_keys
+ pub fn get_signer(&self) -> &Signer {
+ &self.holder_signer
}
#[cfg(test)]
}
pub fn is_outbound(&self) -> bool {
- self.channel_outbound
+ self.channel_transaction_parameters.is_outbound_from_holder
}
/// Gets the fee we'd want to charge for adding an HTLC output to this Channel
// the fee cost of the HTLC-Success/HTLC-Timeout transaction:
let mut res = self.feerate_per_kw as u64 * cmp::max(HTLC_TIMEOUT_TX_WEIGHT, HTLC_SUCCESS_TX_WEIGHT) / 1000;
- if self.channel_outbound {
+ if self.is_outbound() {
// + the marginal fee increase cost to us in the commitment transaction:
res += self.feerate_per_kw as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC / 1000;
}
}
if non_shutdown_state & !(ChannelState::TheirFundingLocked as u32) == ChannelState::FundingSent as u32 {
for &(index_in_block, tx) in txdata.iter() {
- if tx.txid() == self.funding_txo.unwrap().txid {
- let txo_idx = self.funding_txo.unwrap().index as usize;
+ let funding_txo = self.get_funding_txo().unwrap();
+ if tx.txid() == funding_txo.txid {
+ let txo_idx = funding_txo.index as usize;
if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.get_funding_redeemscript().to_v0_p2wsh() ||
tx.output[txo_idx].value != self.channel_value_satoshis {
- if self.channel_outbound {
+ if self.is_outbound() {
// If we generated the funding transaction and it doesn't match what it
// should, the client is really broken and we should just panic and
// tell them off. That said, because hash collisions happen with high
data: "funding tx had wrong script/value".to_owned()
});
} else {
- if self.channel_outbound {
+ if self.is_outbound() {
for input in tx.input.iter() {
if input.witness.is_empty() {
// We generated a malleable funding transaction, implying we've
//a protocol oversight, but I assume I'm just missing something.
if need_commitment_update {
if self.channel_state & (ChannelState::MonitorUpdateFailed as u32) == 0 {
- let next_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ let next_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
return Ok((Some(msgs::FundingLocked {
channel_id: self.channel_id,
next_per_commitment_point,
// something in the handler for the message that prompted this message):
pub fn get_open_channel(&self, chain_hash: BlockHash) -> msgs::OpenChannel {
- if !self.channel_outbound {
+ if !self.is_outbound() {
panic!("Tried to open a channel for an inbound channel?");
}
if self.channel_state != ChannelState::OurInitSent as u32 {
panic!("Tried to send an open_channel for a channel that has already advanced");
}
- let first_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- let keys = self.holder_keys.pubkeys();
+ let first_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ let keys = self.get_holder_pubkeys();
msgs::OpenChannel {
chain_hash,
funding_satoshis: self.channel_value_satoshis,
push_msat: self.channel_value_satoshis * 1000 - self.value_to_self_msat,
dust_limit_satoshis: self.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: Channel::<ChanSigner>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis),
- channel_reserve_satoshis: Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis),
+ max_htlc_value_in_flight_msat: Channel::<Signer>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis),
+ channel_reserve_satoshis: Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis),
htlc_minimum_msat: self.holder_htlc_minimum_msat,
feerate_per_kw: self.feerate_per_kw as u32,
- to_self_delay: self.holder_selected_contest_delay,
+ to_self_delay: self.get_holder_selected_contest_delay(),
max_accepted_htlcs: OUR_MAX_HTLCS,
funding_pubkey: keys.funding_pubkey,
revocation_basepoint: keys.revocation_basepoint,
}
pub fn get_accept_channel(&self) -> msgs::AcceptChannel {
- if self.channel_outbound {
+ if self.is_outbound() {
panic!("Tried to send accept_channel for an outbound channel?");
}
if self.channel_state != (ChannelState::OurInitSent as u32) | (ChannelState::TheirInitSent as u32) {
panic!("Tried to send an accept_channel for a channel that has already advanced");
}
- let first_per_commitment_point = self.holder_keys.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
- let keys = self.holder_keys.pubkeys();
+ let first_per_commitment_point = self.holder_signer.get_per_commitment_point(self.cur_holder_commitment_transaction_number, &self.secp_ctx);
+ let keys = self.get_holder_pubkeys();
msgs::AcceptChannel {
temporary_channel_id: self.channel_id,
dust_limit_satoshis: self.holder_dust_limit_satoshis,
- max_htlc_value_in_flight_msat: Channel::<ChanSigner>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis),
- channel_reserve_satoshis: Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis),
+ max_htlc_value_in_flight_msat: Channel::<Signer>::get_holder_max_htlc_value_in_flight_msat(self.channel_value_satoshis),
+ channel_reserve_satoshis: Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis),
htlc_minimum_msat: self.holder_htlc_minimum_msat,
minimum_depth: self.minimum_depth,
- to_self_delay: self.holder_selected_contest_delay,
+ to_self_delay: self.get_holder_selected_contest_delay(),
max_accepted_htlcs: OUR_MAX_HTLCS,
funding_pubkey: keys.funding_pubkey,
revocation_basepoint: keys.revocation_basepoint,
fn get_outbound_funding_created_signature<L: Deref>(&mut self, logger: &L) -> Result<Signature, ChannelError> where L::Target: Logger {
let counterparty_keys = self.build_remote_transaction_keys()?;
let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0;
- let pre_remote_keys = PreCalculatedTxCreationKeys::new(counterparty_keys);
- Ok(self.holder_keys.sign_counterparty_commitment(self.feerate_per_kw, &counterparty_initial_commitment_tx, &pre_remote_keys, &Vec::new(), &self.secp_ctx)
+ Ok(self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx)
.map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0)
}
/// Do NOT broadcast the funding transaction until after a successful funding_signed call!
/// If an Err is returned, it is a ChannelError::Close.
pub fn get_outbound_funding_created<L: Deref>(&mut self, funding_txo: OutPoint, logger: &L) -> Result<msgs::FundingCreated, ChannelError> where L::Target: Logger {
- if !self.channel_outbound {
+ if !self.is_outbound() {
panic!("Tried to create outbound funding_created message on an inbound channel!");
}
if self.channel_state != (ChannelState::OurInitSent as u32 | ChannelState::TheirInitSent as u32) {
panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
}
- self.funding_txo = Some(funding_txo.clone());
+ self.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
+ self.holder_signer.ready_channel(&self.channel_transaction_parameters);
+
let signature = match self.get_outbound_funding_created_signature(logger) {
Ok(res) => res,
Err(e) => {
log_error!(logger, "Got bad signatures: {:?}!", e);
- self.funding_txo = None;
+ self.channel_transaction_parameters.funding_outpoint = None;
return Err(e);
}
};
short_channel_id: self.get_short_channel_id().unwrap(),
node_id_1: if were_node_one { node_id } else { self.get_counterparty_node_id() },
node_id_2: if were_node_one { self.get_counterparty_node_id() } else { node_id },
- bitcoin_key_1: if were_node_one { self.holder_keys.pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey().clone() },
- bitcoin_key_2: if were_node_one { self.counterparty_funding_pubkey().clone() } else { self.holder_keys.pubkeys().funding_pubkey },
+ bitcoin_key_1: if were_node_one { self.get_holder_pubkeys().funding_pubkey } else { self.counterparty_funding_pubkey().clone() },
+ bitcoin_key_2: if were_node_one { self.counterparty_funding_pubkey().clone() } else { self.get_holder_pubkeys().funding_pubkey },
excess_data: Vec::new(),
};
- let sig = self.holder_keys.sign_channel_announcement(&msg, &self.secp_ctx)
+ let sig = self.holder_signer.sign_channel_announcement(&msg, &self.secp_ctx)
.map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
Ok((msg, sig))
return Err(ChannelError::Ignore(format!("Cannot send value that would put us over the max HTLC value in flight our peer will accept ({})", self.counterparty_max_htlc_value_in_flight_msat)));
}
- if !self.channel_outbound {
+ if !self.is_outbound() {
// Check that we won't violate the remote channel reserve by adding this HTLC.
-
let counterparty_balance_msat = self.channel_value_satoshis * 1000 - self.value_to_self_msat;
- let holder_selected_chan_reserve_msat = Channel::<ChanSigner>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
- // 1 additional HTLC corresponding to this HTLC.
- let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(1);
+ let holder_selected_chan_reserve_msat = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis);
+ let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
+ let counterparty_commit_tx_fee_msat = self.next_remote_commit_tx_fee_msat(htlc_candidate, None);
if counterparty_balance_msat < holder_selected_chan_reserve_msat + counterparty_commit_tx_fee_msat {
return Err(ChannelError::Ignore("Cannot send value that would put counterparty balance under holder-announced channel reserve value".to_owned()));
}
return Err(ChannelError::Ignore(format!("Cannot send value that would overdraw remaining funds. Amount: {}, pending value to self {}", amount_msat, pending_value_to_self_msat)));
}
- // The `+1` is for the HTLC currently being added to the commitment tx and
- // the `2 *` and `+1` are for the fee spike buffer.
- let commit_tx_fee_msat = if self.channel_outbound {
- 2 * self.next_local_commit_tx_fee_msat(1 + 1)
+ // `2 *` and extra HTLC are for the fee spike buffer.
+ let commit_tx_fee_msat = if self.is_outbound() {
+ let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered);
+ 2 * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(()))
} else { 0 };
if pending_value_to_self_msat - amount_msat < commit_tx_fee_msat {
return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", pending_value_to_self_msat, commit_tx_fee_msat)));
}
self.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
- let (res, counterparty_commitment_tx, htlcs) = match self.send_commitment_no_state_update(logger) {
+ let (res, counterparty_commitment_txid, htlcs) = match self.send_commitment_no_state_update(logger) {
Ok((res, (counterparty_commitment_tx, mut htlcs))) => {
// Update state now that we've passed all the can-fail calls...
let htlcs_no_ref: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
let monitor_update = ChannelMonitorUpdate {
update_id: self.latest_monitor_update_id,
updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
- unsigned_commitment_tx: counterparty_commitment_tx.clone(),
+ commitment_txid: counterparty_commitment_txid,
htlc_outputs: htlcs.clone(),
commitment_number: self.cur_counterparty_commitment_transaction_number,
their_revocation_point: self.counterparty_cur_commitment_point.unwrap()
/// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation
/// when we shouldn't change HTLC/channel state.
- fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Transaction, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
+ fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
let mut feerate_per_kw = self.feerate_per_kw;
if let Some(feerate) = self.pending_update_fee {
- if self.channel_outbound {
+ if self.is_outbound() {
feerate_per_kw = feerate;
}
}
let counterparty_keys = self.build_remote_transaction_keys()?;
let counterparty_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, feerate_per_kw, logger);
+ let counterparty_commitment_txid = counterparty_commitment_tx.0.trust().txid();
let (signature, htlc_signatures);
+ #[cfg(any(test, feature = "fuzztarget"))]
+ {
+ if !self.is_outbound() {
+ let projected_commit_tx_info = self.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
+ *self.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
+ if let Some(info) = projected_commit_tx_info {
+ let total_pending_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
+ if info.total_pending_htlcs == total_pending_htlcs
+ && info.next_holder_htlc_id == self.next_holder_htlc_id
+ && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id
+ && info.feerate == self.feerate_per_kw {
+ let actual_fee = self.commit_tx_fee_msat(counterparty_commitment_tx.1);
+ assert_eq!(actual_fee, info.fee);
+ }
+ }
+ }
+ }
+
{
let mut htlcs = Vec::with_capacity(counterparty_commitment_tx.2.len());
for &(ref htlc, _) in counterparty_commitment_tx.2.iter() {
htlcs.push(htlc);
}
- let pre_remote_keys = PreCalculatedTxCreationKeys::new(counterparty_keys);
- let res = self.holder_keys.sign_counterparty_commitment(feerate_per_kw, &counterparty_commitment_tx.0, &pre_remote_keys, &htlcs, &self.secp_ctx)
+ let res = self.holder_signer.sign_counterparty_commitment(&counterparty_commitment_tx.0, &self.secp_ctx)
.map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?;
signature = res.0;
htlc_signatures = res.1;
- let counterparty_keys = pre_remote_keys.trust_key_derivation();
- log_trace!(logger, "Signed remote commitment tx {} with redeemscript {} -> {}",
- encode::serialize_hex(&counterparty_commitment_tx.0),
+ log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {}",
+ encode::serialize_hex(&counterparty_commitment_tx.0.trust().built_transaction().transaction),
+ &counterparty_commitment_txid,
encode::serialize_hex(&self.get_funding_redeemscript()),
log_bytes!(signature.serialize_compact()[..]));
for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {}",
- encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_tx.0.txid(), feerate_per_kw, self.holder_selected_contest_delay, htlc, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
- encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, counterparty_keys)),
+ encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, feerate_per_kw, self.get_holder_selected_contest_delay(), htlc, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
+ encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &counterparty_keys)),
log_bytes!(counterparty_keys.broadcaster_htlc_key.serialize()),
log_bytes!(htlc_sig.serialize_compact()[..]));
}
channel_id: self.channel_id,
signature,
htlc_signatures,
- }, (counterparty_commitment_tx.0, counterparty_commitment_tx.2)))
+ }, (counterparty_commitment_txid, counterparty_commitment_tx.2)))
}
/// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction
_ => {}
}
}
- let funding_txo = if let Some(funding_txo) = self.funding_txo {
+ let funding_txo = if let Some(funding_txo) = self.get_funding_txo() {
// If we haven't yet exchanged funding signatures (ie channel_state < FundingSent),
// returning a channel monitor update here would imply a channel monitor update before
// we even registered the channel monitor to begin with, which is invalid.
}
}
+fn is_unsupported_shutdown_script(their_features: &InitFeatures, script: &Script) -> bool {
+ // We restrain shutdown scripts to standards forms to avoid transactions not propagating on the p2p tx-relay network
+
+ // BOLT 2 says we must only send a scriptpubkey of certain standard forms,
+ // which for a a BIP-141-compliant witness program is at max 42 bytes in length.
+ // So don't let the remote peer feed us some super fee-heavy script.
+ let is_script_too_long = script.len() > 42;
+ if is_script_too_long {
+ return true;
+ }
+
+ if their_features.supports_shutdown_anysegwit() && script.is_witness_program() && script.as_bytes()[0] != OP_PUSHBYTES_0.into_u8() {
+ return false;
+ }
+
+ return !script.is_p2pkh() && !script.is_p2sh() && !script.is_v0_p2wpkh() && !script.is_v0_p2wsh()
+}
+
const SERIALIZATION_VERSION: u8 = 1;
const MIN_SERIALIZATION_VERSION: u8 = 1;
}
}
-impl<ChanSigner: ChannelKeys + Writeable> Writeable for Channel<ChanSigner> {
+impl<Signer: Sign> Writeable for Channel<Signer> {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
// Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
// called but include holding cell updates (and obviously we don't modify self).
self.channel_id.write(writer)?;
(self.channel_state | ChannelState::PeerDisconnected as u32).write(writer)?;
- self.channel_outbound.write(writer)?;
self.channel_value_satoshis.write(writer)?;
self.latest_monitor_update_id.write(writer)?;
- self.holder_keys.write(writer)?;
+ let mut key_data = VecWriter(Vec::new());
+ self.holder_signer.write(&mut key_data)?;
+ assert!(key_data.0.len() < std::usize::MAX);
+ assert!(key_data.0.len() < std::u32::MAX as usize);
+ (key_data.0.len() as u32).write(writer)?;
+ writer.write_all(&key_data.0[..])?;
+
self.shutdown_pubkey.write(writer)?;
self.destination_script.write(writer)?;
None => 0u8.write(writer)?,
}
- self.funding_txo.write(writer)?;
self.funding_tx_confirmed_in.write(writer)?;
self.short_channel_id.write(writer)?;
self.counterparty_selected_channel_reserve_satoshis.write(writer)?;
self.counterparty_htlc_minimum_msat.write(writer)?;
self.holder_htlc_minimum_msat.write(writer)?;
- self.counterparty_selected_contest_delay.write(writer)?;
- self.holder_selected_contest_delay.write(writer)?;
self.counterparty_max_accepted_htlcs.write(writer)?;
self.minimum_depth.write(writer)?;
- self.counterparty_pubkeys.write(writer)?;
+ self.channel_transaction_parameters.write(writer)?;
self.counterparty_cur_commitment_point.write(writer)?;
self.counterparty_prev_commitment_point.write(writer)?;
}
}
-impl<ChanSigner: ChannelKeys + Readable> Readable for Channel<ChanSigner> {
- fn read<R : ::std::io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
+const MAX_ALLOC_SIZE: usize = 64*1024;
+impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel<Signer>
+ where K::Target: KeysInterface<Signer = Signer> {
+ fn read<R : ::std::io::Read>(reader: &mut R, keys_source: &'a K) -> Result<Self, DecodeError> {
let _ver: u8 = Readable::read(reader)?;
let min_ver: u8 = Readable::read(reader)?;
if min_ver > SERIALIZATION_VERSION {
let channel_id = Readable::read(reader)?;
let channel_state = Readable::read(reader)?;
- let channel_outbound = Readable::read(reader)?;
let channel_value_satoshis = Readable::read(reader)?;
let latest_monitor_update_id = Readable::read(reader)?;
- let holder_keys = Readable::read(reader)?;
+ let keys_len: u32 = Readable::read(reader)?;
+ let mut keys_data = Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE));
+ while keys_data.len() != keys_len as usize {
+ // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
+ let mut data = [0; 1024];
+ let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.len())];
+ reader.read_exact(read_slice)?;
+ keys_data.extend_from_slice(read_slice);
+ }
+ let holder_signer = keys_source.read_chan_signer(&keys_data)?;
+
let shutdown_pubkey = Readable::read(reader)?;
let destination_script = Readable::read(reader)?;
_ => return Err(DecodeError::InvalidValue),
};
- let funding_txo = Readable::read(reader)?;
let funding_tx_confirmed_in = Readable::read(reader)?;
let short_channel_id = Readable::read(reader)?;
let counterparty_selected_channel_reserve_satoshis = Readable::read(reader)?;
let counterparty_htlc_minimum_msat = Readable::read(reader)?;
let holder_htlc_minimum_msat = Readable::read(reader)?;
- let counterparty_selected_contest_delay = Readable::read(reader)?;
- let holder_selected_contest_delay = Readable::read(reader)?;
let counterparty_max_accepted_htlcs = Readable::read(reader)?;
let minimum_depth = Readable::read(reader)?;
- let counterparty_pubkeys = Readable::read(reader)?;
+ let channel_parameters = Readable::read(reader)?;
let counterparty_cur_commitment_point = Readable::read(reader)?;
let counterparty_prev_commitment_point = Readable::read(reader)?;
let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
let commitment_secrets = Readable::read(reader)?;
+ let mut secp_ctx = Secp256k1::new();
+ secp_ctx.seeded_randomize(&keys_source.get_secure_random_bytes());
+
Ok(Channel {
user_id,
config,
channel_id,
channel_state,
- channel_outbound,
- secp_ctx: Secp256k1::new(),
+ secp_ctx,
channel_value_satoshis,
latest_monitor_update_id,
- holder_keys,
+ holder_signer,
shutdown_pubkey,
destination_script,
last_sent_closing_fee,
- funding_txo,
funding_tx_confirmed_in,
short_channel_id,
last_block_connected,
counterparty_selected_channel_reserve_satoshis,
counterparty_htlc_minimum_msat,
holder_htlc_minimum_msat,
- counterparty_selected_contest_delay,
- holder_selected_contest_delay,
counterparty_max_accepted_htlcs,
minimum_depth,
- counterparty_pubkeys,
+ channel_transaction_parameters: channel_parameters,
counterparty_cur_commitment_point,
counterparty_prev_commitment_point,
commitment_secrets,
network_sync: UpdateStatus::Fresh,
+
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_local_commitment_tx_fee_info_cached: Mutex::new(None),
+ #[cfg(any(test, feature = "fuzztarget"))]
+ next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
})
}
}
use bitcoin::hashes::hex::FromHex;
use hex;
use ln::channelmanager::{HTLCSource, PaymentPreimage, PaymentHash};
- use ln::channel::{Channel,ChannelKeys,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,TxCreationKeys};
+ use ln::channel::{Channel,Sign,InboundHTLCOutput,OutboundHTLCOutput,InboundHTLCState,OutboundHTLCState,HTLCOutputInCommitment,HTLCCandidate,HTLCInitiator,TxCreationKeys};
use ln::channel::MAX_FUNDING_SATOSHIS;
use ln::features::InitFeatures;
- use ln::msgs::{OptionalField, DataLossProtect};
+ use ln::msgs::{OptionalField, DataLossProtect, DecodeError};
use ln::chan_utils;
- use ln::chan_utils::{HolderCommitmentTransaction, ChannelPublicKeys};
+ use ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters, HTLC_SUCCESS_TX_WEIGHT, HTLC_TIMEOUT_TX_WEIGHT};
use chain::chaininterface::{FeeEstimator,ConfirmationTarget};
- use chain::keysinterface::{InMemoryChannelKeys, KeysInterface};
+ use chain::keysinterface::{InMemorySigner, KeysInterface};
use chain::transaction::OutPoint;
use util::config::UserConfig;
- use util::enforcing_trait_impls::EnforcingChannelKeys;
+ use util::enforcing_trait_impls::EnforcingSigner;
use util::test_utils;
use util::logger::Logger;
use bitcoin::secp256k1::{Secp256k1, Message, Signature, All};
}
struct Keys {
- chan_keys: InMemoryChannelKeys,
+ signer: InMemorySigner,
}
impl KeysInterface for Keys {
- type ChanKeySigner = InMemoryChannelKeys;
+ type Signer = InMemorySigner;
fn get_node_secret(&self) -> SecretKey { panic!(); }
fn get_destination_script(&self) -> Script {
PublicKey::from_secret_key(&secp_ctx, &channel_close_key)
}
- fn get_channel_keys(&self, _inbound: bool, _channel_value_satoshis: u64) -> InMemoryChannelKeys {
- self.chan_keys.clone()
+ fn get_channel_signer(&self, _inbound: bool, _channel_value_satoshis: u64) -> InMemorySigner {
+ self.signer.clone()
}
fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
+ fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::Signer, DecodeError> { panic!(); }
}
fn public_from_secret_hex(secp_ctx: &Secp256k1<All>, hex: &str) -> PublicKey {
let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let node_a_chan = Channel::<EnforcingChannelKeys>::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, 10000000, 100000, 42, &config).unwrap();
+ let node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_a_node_id, 10000000, 100000, 42, &config).unwrap();
// Now change the fee so we can check that the fee in the open_channel message is the
// same as the old fee.
assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
}
+ #[test]
+ fn test_holder_vs_counterparty_dust_limit() {
+ // Test that when calculating the local and remote commitment transaction fees, the correct
+ // dust limits are used.
+ let feeest = TestFeeEstimator{fee_est: 15000};
+ let secp_ctx = Secp256k1::new();
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+ // Go through the flow of opening a channel between two nodes, making sure
+ // they have different dust limits.
+
+ // Create Node A's channel pointing to Node B's pubkey
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let config = UserConfig::default();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
+
+ // Create Node B's channel by receiving Node A's open_channel message
+ // Make sure A's dust limit is as we expect.
+ let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
+ assert_eq!(open_channel_msg.dust_limit_satoshis, 1560);
+ let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+ let node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
+
+ // Node B --> Node A: accept channel, explicitly setting B's dust limit.
+ let mut accept_channel_msg = node_b_chan.get_accept_channel();
+ accept_channel_msg.dust_limit_satoshis = 546;
+ node_a_chan.accept_channel(&accept_channel_msg, &config, InitFeatures::known()).unwrap();
+
+ // Put some inbound and outbound HTLCs in A's channel.
+ let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
+ node_a_chan.pending_inbound_htlcs.push(InboundHTLCOutput {
+ htlc_id: 0,
+ amount_msat: htlc_amount_msat,
+ payment_hash: PaymentHash(Sha256::hash(&[42; 32]).into_inner()),
+ cltv_expiry: 300000000,
+ state: InboundHTLCState::Committed,
+ });
+
+ node_a_chan.pending_outbound_htlcs.push(OutboundHTLCOutput {
+ htlc_id: 1,
+ amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
+ payment_hash: PaymentHash(Sha256::hash(&[43; 32]).into_inner()),
+ cltv_expiry: 200000000,
+ state: OutboundHTLCState::Committed,
+ source: HTLCSource::OutboundRoute {
+ path: Vec::new(),
+ session_priv: SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
+ first_hop_htlc_msat: 548,
+ }
+ });
+
+ // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
+ // the dust limit check.
+ let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
+ let local_commit_tx_fee = node_a_chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ let local_commit_fee_0_htlcs = node_a_chan.commit_tx_fee_msat(0);
+ assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
+
+ // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
+ // of the HTLCs are seen to be above the dust limit.
+ node_a_chan.channel_transaction_parameters.is_outbound_from_holder = false;
+ let remote_commit_fee_3_htlcs = node_a_chan.commit_tx_fee_msat(3);
+ let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
+ let remote_commit_tx_fee = node_a_chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+ assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
+ }
+
+ #[test]
+ fn test_timeout_vs_success_htlc_dust_limit() {
+ // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
+ // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
+ // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
+ // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
+ let fee_est = TestFeeEstimator{fee_est: 253 };
+ let secp_ctx = Secp256k1::new();
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+
+ let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let config = UserConfig::default();
+ let mut chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, node_id, 10000000, 100000, 42, &config).unwrap();
+
+ let commitment_tx_fee_0_htlcs = chan.commit_tx_fee_msat(0);
+ let commitment_tx_fee_1_htlc = chan.commit_tx_fee_msat(1);
+
+ // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
+ // counted as dust when it shouldn't be.
+ let htlc_amt_above_timeout = ((253 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + chan.holder_dust_limit_satoshis + 1) * 1000;
+ let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
+ let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
+
+ // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
+ let dust_htlc_amt_below_success = ((253 * HTLC_SUCCESS_TX_WEIGHT / 1000) + chan.holder_dust_limit_satoshis - 1) * 1000;
+ let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
+ let commitment_tx_fee = chan.next_local_commit_tx_fee_msat(htlc_candidate, None);
+ assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
+
+ chan.channel_transaction_parameters.is_outbound_from_holder = false;
+
+ // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
+ let dust_htlc_amt_above_timeout = ((253 * HTLC_TIMEOUT_TX_WEIGHT / 1000) + chan.counterparty_dust_limit_satoshis + 1) * 1000;
+ let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
+ let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+ assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
+
+ // If swapped: this HTLC would be counted as dust when it shouldn't be.
+ let htlc_amt_below_success = ((253 * HTLC_SUCCESS_TX_WEIGHT / 1000) + chan.counterparty_dust_limit_satoshis - 1) * 1000;
+ let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
+ let commitment_tx_fee = chan.next_remote_commit_tx_fee_msat(htlc_candidate, None);
+ assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
+ }
+
#[test]
fn channel_reestablish_no_updates() {
let feeest = TestFeeEstimator{fee_est: 15000};
// Create Node A's channel pointing to Node B's pubkey
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let config = UserConfig::default();
- let mut node_a_chan = Channel::<EnforcingChannelKeys>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
+ let mut node_a_chan = Channel::<EnforcingSigner>::new_outbound(&&feeest, &&keys_provider, node_b_node_id, 10000000, 100000, 42, &config).unwrap();
// Create Node B's channel by receiving Node A's open_channel message
let open_channel_msg = node_a_chan.get_open_channel(genesis_block(network).header.block_hash());
let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
- let mut node_b_chan = Channel::<EnforcingChannelKeys>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
+ let mut node_b_chan = Channel::<EnforcingSigner>::new_from_req(&&feeest, &&keys_provider, node_b_node_id, InitFeatures::known(), &open_channel_msg, 7, &config).unwrap();
// Node B --> Node A: accept channel
let accept_channel_msg = node_b_chan.get_accept_channel();
let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
let secp_ctx = Secp256k1::new();
- let mut chan_keys = InMemoryChannelKeys::new(
+ let mut signer = InMemorySigner::new(
&secp_ctx,
SecretKey::from_slice(&hex::decode("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
SecretKey::from_slice(&hex::decode("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
// These aren't set in the test vectors:
[0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
10_000_000,
- (0, 0)
+ [0; 32]
);
- assert_eq!(chan_keys.pubkeys().funding_pubkey.serialize()[..],
+ assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
hex::decode("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
- let keys_provider = Keys { chan_keys: chan_keys.clone() };
+ let keys_provider = Keys { signer: signer.clone() };
let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
let mut config = UserConfig::default();
config.channel_options.announced_channel = false;
- let mut chan = Channel::<InMemoryChannelKeys>::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, 10_000_000, 100000, 42, &config).unwrap(); // Nothing uses their network key in this test
- chan.counterparty_selected_contest_delay = 144;
+ let mut chan = Channel::<InMemorySigner>::new_outbound(&&feeest, &&keys_provider, counterparty_node_id, 10_000_000, 100000, 42, &config).unwrap(); // Nothing uses their network key in this test
chan.holder_dust_limit_satoshis = 546;
let funding_info = OutPoint{ txid: Txid::from_hex("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
- chan.funding_txo = Some(funding_info);
let counterparty_pubkeys = ChannelPublicKeys {
funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
delayed_payment_basepoint: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
htlc_basepoint: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444")
};
- chan_keys.on_accept(&counterparty_pubkeys, chan.counterparty_selected_contest_delay, chan.holder_selected_contest_delay);
+ chan.channel_transaction_parameters.counterparty_parameters = Some(
+ CounterpartyChannelTransactionParameters {
+ pubkeys: counterparty_pubkeys.clone(),
+ selected_contest_delay: 144
+ });
+ chan.channel_transaction_parameters.funding_outpoint = Some(funding_info);
+ signer.ready_channel(&chan.channel_transaction_parameters);
assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
hex::decode("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
// We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
// derived from a commitment_seed, so instead we copy it here and call
// build_commitment_transaction.
- let delayed_payment_base = &chan.holder_keys.pubkeys().delayed_payment_basepoint;
+ let delayed_payment_base = &chan.holder_signer.pubkeys().delayed_payment_basepoint;
let per_commitment_secret = SecretKey::from_slice(&hex::decode("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
- let htlc_basepoint = &chan.holder_keys.pubkeys().htlc_basepoint;
+ let htlc_basepoint = &chan.holder_signer.pubkeys().htlc_basepoint;
let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint).unwrap();
- chan.counterparty_pubkeys = Some(counterparty_pubkeys);
-
- let mut unsigned_tx: (Transaction, Vec<HTLCOutputInCommitment>);
-
- let mut holdertx;
macro_rules! test_commitment {
( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, {
$( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
} ) => { {
- unsigned_tx = {
+ let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
let mut res = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, chan.feerate_per_kw, &logger);
+
let htlcs = res.2.drain(..)
.filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
.collect();
(res.0, htlcs)
};
+ let trusted_tx = commitment_tx.trust();
+ let unsigned_tx = trusted_tx.built_transaction();
let redeemscript = chan.get_funding_redeemscript();
let counterparty_signature = Signature::from_der(&hex::decode($counterparty_sig_hex).unwrap()[..]).unwrap();
- let sighash = Message::from_slice(&bip143::SigHashCache::new(&unsigned_tx.0).signature_hash(0, &redeemscript, chan.channel_value_satoshis, SigHashType::All)[..]).unwrap();
+ let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.channel_value_satoshis);
secp_ctx.verify(&sighash, &counterparty_signature, chan.counterparty_funding_pubkey()).unwrap();
- let mut per_htlc = Vec::new();
+ let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
+ let mut counterparty_htlc_sigs = Vec::new();
+ counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
$({
let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
- per_htlc.push((unsigned_tx.1[$htlc_idx].clone(), Some(remote_signature)));
+ per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
+ counterparty_htlc_sigs.push(remote_signature);
})*
- assert_eq!(unsigned_tx.1.len(), per_htlc.len());
+ assert_eq!(htlcs.len(), per_htlc.len());
- holdertx = HolderCommitmentTransaction::new_missing_holder_sig(unsigned_tx.0.clone(), counterparty_signature.clone(), &chan_keys.pubkeys().funding_pubkey, chan.counterparty_funding_pubkey(), keys.clone(), chan.feerate_per_kw, per_htlc);
- let holder_sig = chan_keys.sign_holder_commitment(&holdertx, &chan.secp_ctx).unwrap();
- assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig);
+ let holder_commitment_tx = HolderCommitmentTransaction::new(
+ commitment_tx.clone(),
+ counterparty_signature,
+ counterparty_htlc_sigs,
+ &chan.holder_signer.pubkeys().funding_pubkey,
+ chan.counterparty_funding_pubkey()
+ );
+ let (holder_sig, htlc_sigs) = signer.sign_holder_commitment_and_htlcs(&holder_commitment_tx, &secp_ctx).unwrap();
+ assert_eq!(Signature::from_der(&hex::decode($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
- assert_eq!(serialize(&holdertx.add_holder_sig(&redeemscript, holder_sig))[..],
- hex::decode($tx_hex).unwrap()[..]);
+ let funding_redeemscript = chan.get_funding_redeemscript();
+ let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
+ assert_eq!(serialize(&tx)[..], hex::decode($tx_hex).unwrap()[..], "tx");
- let htlc_sigs = chan_keys.sign_holder_commitment_htlc_transactions(&holdertx, &chan.secp_ctx).unwrap();
- let mut htlc_sig_iter = holdertx.per_htlc.iter().zip(htlc_sigs.iter().enumerate());
+ // ((htlc, counterparty_sig), (index, holder_sig))
+ let mut htlc_sig_iter = holder_commitment_tx.htlcs().iter().zip(&holder_commitment_tx.counterparty_htlc_sigs).zip(htlc_sigs.iter().enumerate());
$({
let remote_signature = Signature::from_der(&hex::decode($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
- let ref htlc = unsigned_tx.1[$htlc_idx];
- let htlc_tx = chan.build_htlc_transaction(&unsigned_tx.0.txid(), &htlc, true, &keys, chan.feerate_per_kw);
+ let ref htlc = htlcs[$htlc_idx];
+ let htlc_tx = chan.build_htlc_transaction(&unsigned_tx.txid, &htlc, true, &keys, chan.feerate_per_kw);
let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &keys);
let htlc_sighash = Message::from_slice(&bip143::SigHashCache::new(&htlc_tx).signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, SigHashType::All)[..]).unwrap();
secp_ctx.verify(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key).unwrap();
assert!(preimage.is_some());
}
- let mut htlc_sig = htlc_sig_iter.next().unwrap();
- while (htlc_sig.1).1.is_none() { htlc_sig = htlc_sig_iter.next().unwrap(); }
- assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx));
+ let htlc_sig = htlc_sig_iter.next().unwrap();
+ assert_eq!((htlc_sig.0).0.transaction_output_index, Some($htlc_idx), "output index");
let signature = Signature::from_der(&hex::decode($htlc_sig_hex).unwrap()[..]).unwrap();
- assert_eq!(Some(signature), *(htlc_sig.1).1);
- assert_eq!(serialize(&holdertx.get_signed_htlc_tx((htlc_sig.1).0, &(htlc_sig.1).1.unwrap(), &preimage, chan.counterparty_selected_contest_delay))[..],
- hex::decode($htlc_tx_hex).unwrap()[..]);
+ assert_eq!(signature, *(htlc_sig.1).1, "htlc sig");
+ let index = (htlc_sig.1).0;
+ let channel_parameters = chan.channel_transaction_parameters.as_holder_broadcastable();
+ let trusted_tx = holder_commitment_tx.trust();
+ assert_eq!(serialize(&trusted_tx.get_signed_htlc_tx(&channel_parameters, index, &(htlc_sig.0).1, (htlc_sig.1).1, &preimage))[..],
+ hex::decode($htlc_tx_hex).unwrap()[..], "htlc tx");
})*
- loop {
- let htlc_sig = htlc_sig_iter.next();
- if htlc_sig.is_none() { break; }
- assert!((htlc_sig.unwrap().1).1.is_none());
- }
+ assert!(htlc_sig_iter.next().is_none());
} }
}
test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
"304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
"02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
+
+ // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
+ chan.value_to_self_msat = 7_000_000_000 - 2_000_000;
+ chan.feerate_per_kw = 253;
+ chan.pending_inbound_htlcs.clear();
+ chan.pending_inbound_htlcs.push({
+ let mut out = InboundHTLCOutput{
+ htlc_id: 1,
+ amount_msat: 2000000,
+ cltv_expiry: 501,
+ payment_hash: PaymentHash([0; 32]),
+ state: InboundHTLCState::Committed,
+ };
+ out.payment_hash.0 = Sha256::hash(&hex::decode("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).into_inner();
+ out
+ });
+ chan.pending_outbound_htlcs.clear();
+ chan.pending_outbound_htlcs.push({
+ let mut out = OutboundHTLCOutput{
+ htlc_id: 6,
+ amount_msat: 5000000,
+ cltv_expiry: 506,
+ payment_hash: PaymentHash([0; 32]),
+ state: OutboundHTLCState::Committed,
+ source: HTLCSource::dummy(),
+ };
+ out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
+ out
+ });
+ chan.pending_outbound_htlcs.push({
+ let mut out = OutboundHTLCOutput{
+ htlc_id: 5,
+ amount_msat: 5000000,
+ cltv_expiry: 505,
+ payment_hash: PaymentHash([0; 32]),
+ state: OutboundHTLCState::Committed,
+ source: HTLCSource::dummy(),
+ };
+ out.payment_hash.0 = Sha256::hash(&hex::decode("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).into_inner();
+ out
+ });
+
+ test_commitment!("30440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df9",
+ "3045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f",
+ "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a79f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c4f1d60b6fca9febc8b39de1a31e84c5f7c4b41c97239ef05f4350aa484c6b5e02200c5134ac8b20eb7a29d0dd4a501f6aa8fefb8489171f4cb408bd2a32324ab03f014730440220048705bec5288d28b3f29344b8d124853b1af423a568664d2c6f02c8ea886525022060f998a461052a2476b912db426ea2a06700953a241135c7957f2e79bc222df901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
+
+ { 0,
+ "304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c6",
+ "304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c",
+ "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc34000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050048304502210081cbb94121761d34c189cd4e6a281feea6f585060ad0ba2632e8d6b3c6bb8a6c02201007981bbd16539d63df2805b5568f1f5688cd2a885d04706f50db9b77ba13c60148304502210090ed76aeb21b53236a598968abc66e2024691d07b62f53ddbeca8f93144af9c602205f873af5a0c10e62690e9aba09740550f194a9dc455ba4c1c23f6cde7704674c012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
+ { 1,
+ "304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5",
+ "30450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d39",
+ "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3401000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402201d0f09d2bf7bc245a4f17980e1e9164290df16c70c6a2ff1592f5030d6108581022061e744a7dc151b36bf0aff7a4f1812ba90b8b03633bb979a270d19858fd960c5014830450221009aef000d2e843a4202c1b1a2bf554abc9a7902bf49b2cb0759bc507456b7ebad02204e7c3d193ede2fd2b4cd6b39f51a920e581e35575e357e44d7b699c40ce61d3901008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
+ { 2,
+ "30440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f",
+ "30440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e511",
+ "0200000000010189a326e23addc28323dbadcb4e71c2c17088b6e8fa184103e552f44075dddc3402000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220010bf035d5823596e50dce2076a4d9f942d8d28031c9c428b901a02b6b8140de02203250e8e4a08bc5b4ecdca4d0eedf98223e02e3ac1c0206b3a7ffdb374aa21e5f014730440220073de0067b88e425b3018b30366bfeda0ccb703118ccd3d02ead08c0f53511d002203fac50ac0e4cf8a3af0b4b1b12e801650591f748f8ddf1e089c160f10b69e51101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
+ } );
}
#[test]
use chain::channelmonitor;
use chain::channelmonitor::{ChannelMonitor, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY};
use chain::transaction::OutPoint;
-use chain::keysinterface::{ChannelKeys, KeysInterface, SpendableOutputDescriptor};
+use chain::keysinterface::{Sign, KeysInterface};
use ln::channel::{COMMITMENT_TX_BASE_WEIGHT, COMMITMENT_TX_WEIGHT_PER_HTLC};
use ln::channelmanager::{ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentPreimage, PaymentHash, PaymentSecret, PaymentSendFailure, BREAKDOWN_TIMEOUT};
use ln::channel::{Channel, ChannelError};
use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler,RoutingMessageHandler,HTLCFailChannelUpdate, ErrorAction};
-use util::enforcing_trait_impls::EnforcingChannelKeys;
+use util::enforcing_trait_impls::EnforcingSigner;
use util::{byte_utils, test_utils};
use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::errors::APIError;
-use util::ser::{Writeable, ReadableArgs, Readable};
+use util::ser::{Writeable, ReadableArgs};
use util::config::UserConfig;
use bitcoin::hashes::sha256d::Hash as Sha256dHash;
-use bitcoin::hashes::HashEngine;
-use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash};
-use bitcoin::util::bip143;
-use bitcoin::util::address::Address;
-use bitcoin::util::bip32::{ChildNumber, ExtendedPubKey, ExtendedPrivKey};
+use bitcoin::hash_types::{Txid, BlockHash};
use bitcoin::blockdata::block::{Block, BlockHeader};
-use bitcoin::blockdata::transaction::{Transaction, TxOut, TxIn, SigHashType, OutPoint as BitcoinOutPoint};
-use bitcoin::blockdata::script::{Builder, Script};
+use bitcoin::blockdata::script::Builder;
use bitcoin::blockdata::opcodes;
use bitcoin::blockdata::constants::genesis_block;
use bitcoin::network::constants::Network;
use std::collections::{BTreeSet, HashMap, HashSet};
use std::default::Default;
-use std::sync::{Arc, Mutex};
+use std::sync::Mutex;
use std::sync::atomic::Ordering;
use std::mem;
use ln::functional_test_utils::*;
-use ln::chan_utils::PreCalculatedTxCreationKeys;
+use ln::chan_utils::CommitmentTransaction;
+use ln::msgs::OptionalField::Present;
#[test]
fn test_insane_channel_opens() {
// Instantiate channel parameters where we push the maximum msats given our
// funding satoshis
let channel_value_sat = 31337; // same as funding satoshis
- let channel_reserve_satoshis = Channel::<EnforcingChannelKeys>::get_holder_selected_channel_reserve_satoshis(channel_value_sat);
+ let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat);
let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
// Have node0 initiate a channel to node1 with aforementioned parameters
nodes[0].node.close_channel(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
nodes[0].node.close_channel(&chan_1.2).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
nodes[1].node.close_channel(&chan_1.2).unwrap();
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[1], 1);
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[1].node.close_channel(&chan_1.2).unwrap();
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
if recv_count > 0 {
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
if recv_count > 1 {
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
}
}
nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &node_1_reestablish);
let node_0_2nd_shutdown = if recv_count > 0 {
let node_0_2nd_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
node_0_2nd_shutdown
} else {
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_2nd_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_2nd_shutdown);
get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())
};
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_2nd_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_2nd_shutdown);
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
let node_1_3rd_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
assert!(node_1_3rd_shutdown == node_1_2nd_shutdown);
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_3rd_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_3rd_shutdown);
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_3rd_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_1_3rd_shutdown);
let node_0_2nd_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
assert!(node_0_closing_signed == node_0_2nd_closing_signed);
let route = get_route(&nodes[0].node.get_our_node_id(), net_graph_msg_handler, &nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap();
(route, payment_hash, payment_preimage)
}}
- };
+ }
let (route, payment_hash, _) = get_route_and_payment_hash!(3460001);
// Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
- // Get the EnforcingChannelKeys for each channel, which will be used to (1) get the keys
+ // Get the EnforcingSigner for each channel, which will be used to (1) get the keys
// needed to sign the new commitment tx and (2) sign the new commitment tx.
- let (local_revocation_basepoint, local_htlc_basepoint, local_payment_point, local_secret, local_secret2) = {
+ let (local_revocation_basepoint, local_htlc_basepoint, local_secret, next_local_point) = {
let chan_lock = nodes[0].node.channel_state.lock().unwrap();
let local_chan = chan_lock.by_id.get(&chan.2).unwrap();
- let chan_keys = local_chan.get_keys();
- let pubkeys = chan_keys.pubkeys();
- (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint, pubkeys.payment_point,
- chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER), chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2))
+ let chan_signer = local_chan.get_signer();
+ let pubkeys = chan_signer.pubkeys();
+ (pubkeys.revocation_basepoint, pubkeys.htlc_basepoint,
+ chan_signer.release_commitment_secret(INITIAL_COMMITMENT_NUMBER),
+ chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 2, &secp_ctx))
};
- let (remote_delayed_payment_basepoint, remote_htlc_basepoint, remote_payment_point, remote_secret1) = {
+ let (remote_delayed_payment_basepoint, remote_htlc_basepoint,remote_point) = {
let chan_lock = nodes[1].node.channel_state.lock().unwrap();
let remote_chan = chan_lock.by_id.get(&chan.2).unwrap();
- let chan_keys = remote_chan.get_keys();
- let pubkeys = chan_keys.pubkeys();
- (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint, pubkeys.payment_point,
- chan_keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1))
+ let chan_signer = remote_chan.get_signer();
+ let pubkeys = chan_signer.pubkeys();
+ (pubkeys.delayed_payment_basepoint, pubkeys.htlc_basepoint,
+ chan_signer.get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &secp_ctx))
};
// Assemble the set of keys we can use for signatures for our commitment_signed message.
- let commitment_secret = SecretKey::from_slice(&remote_secret1).unwrap();
- let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &commitment_secret);
- let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, &remote_delayed_payment_basepoint,
+ let commit_tx_keys = chan_utils::TxCreationKeys::derive_new(&secp_ctx, &remote_point, &remote_delayed_payment_basepoint,
&remote_htlc_basepoint, &local_revocation_basepoint, &local_htlc_basepoint).unwrap();
// Build the remote commitment transaction so we can sign it, and then later use the
// signature for the commitment_signed message.
let local_chan_balance = 1313;
- let static_payment_pk = local_payment_point.serialize();
- let remote_commit_tx_output = TxOut {
- script_pubkey: Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0)
- .push_slice(&WPubkeyHash::hash(&static_payment_pk)[..])
- .into_script(),
- value: local_chan_balance as u64
- };
-
- let local_commit_tx_output = TxOut {
- script_pubkey: chan_utils::get_revokeable_redeemscript(&commit_tx_keys.revocation_key,
- BREAKDOWN_TIMEOUT,
- &commit_tx_keys.broadcaster_delayed_payment_key).to_v0_p2wsh(),
- value: 95000,
- };
let accepted_htlc_info = chan_utils::HTLCOutputInCommitment {
offered: false,
amount_msat: 3460001,
cltv_expiry: htlc_cltv,
- payment_hash: payment_hash,
+ payment_hash,
transaction_output_index: Some(1),
};
- let htlc_output = TxOut {
- script_pubkey: chan_utils::get_htlc_redeemscript(&accepted_htlc_info, &commit_tx_keys).to_v0_p2wsh(),
- value: 3460001 / 1000
- };
-
- let commit_tx_obscure_factor = {
- let mut sha = Sha256::engine();
- let remote_payment_point = &remote_payment_point.serialize();
- sha.input(&local_payment_point.serialize());
- sha.input(remote_payment_point);
- let res = Sha256::from_engine(sha).into_inner();
-
- ((res[26] as u64) << 5*8) |
- ((res[27] as u64) << 4*8) |
- ((res[28] as u64) << 3*8) |
- ((res[29] as u64) << 2*8) |
- ((res[30] as u64) << 1*8) |
- ((res[31] as u64) << 0*8)
- };
- let commitment_number = 1;
- let obscured_commitment_transaction_number = commit_tx_obscure_factor ^ commitment_number;
- let lock_time = ((0x20 as u32) << 8*3) | ((obscured_commitment_transaction_number & 0xffffffu64) as u32);
- let input = TxIn {
- previous_output: BitcoinOutPoint { txid: chan.3.txid(), vout: 0 },
- script_sig: Script::new(),
- sequence: ((0x80 as u32) << 8*3) | ((obscured_commitment_transaction_number >> 3*8) as u32),
- witness: Vec::new(),
- };
+ let commitment_number = INITIAL_COMMITMENT_NUMBER - 1;
- let commit_tx = Transaction {
- version: 2,
- lock_time,
- input: vec![input],
- output: vec![remote_commit_tx_output, htlc_output, local_commit_tx_output],
- };
let res = {
let local_chan_lock = nodes[0].node.channel_state.lock().unwrap();
let local_chan = local_chan_lock.by_id.get(&chan.2).unwrap();
- let local_chan_keys = local_chan.get_keys();
- let pre_commit_tx_keys = PreCalculatedTxCreationKeys::new(commit_tx_keys);
- local_chan_keys.sign_counterparty_commitment(feerate_per_kw, &commit_tx, &pre_commit_tx_keys, &[&accepted_htlc_info], &secp_ctx).unwrap()
+ let local_chan_signer = local_chan.get_signer();
+ let commitment_tx = CommitmentTransaction::new_with_auxiliary_htlc_data(
+ commitment_number,
+ 95000,
+ local_chan_balance,
+ commit_tx_keys.clone(),
+ feerate_per_kw,
+ &mut vec![(accepted_htlc_info, ())],
+ &local_chan.channel_transaction_parameters.as_counterparty_broadcastable()
+ );
+ local_chan_signer.sign_counterparty_commitment(&commitment_tx, &secp_ctx).unwrap()
};
let commit_signed_msg = msgs::CommitmentSigned {
let _ = nodes[1].node.get_and_clear_pending_msg_events();
// Send the RAA to nodes[1].
- let per_commitment_secret = local_secret;
- let next_secret = SecretKey::from_slice(&local_secret2).unwrap();
- let next_per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &next_secret);
- let raa_msg = msgs::RevokeAndACK{ channel_id: chan.2, per_commitment_secret, next_per_commitment_point};
+ let raa_msg = msgs::RevokeAndACK {
+ channel_id: chan.2,
+ per_commitment_secret: local_secret,
+ next_per_commitment_point: next_local_point
+ };
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &raa_msg);
let events = nodes[1].node.get_and_clear_pending_msg_events();
fn test_chan_reserve_violation_outbound_htlc_inbound_chan() {
let mut chanmon_cfgs = create_chanmon_cfgs(2);
// Set the fee rate for the channel very high, to the point where the fundee
- // sending any amount would result in a channel reserve violation. In this test
- // we check that we would be prevented from sending an HTLC in this situation.
+ // sending any above-dust amount would result in a channel reserve violation.
+ // In this test we check that we would be prevented from sending an HTLC in
+ // this situation.
chanmon_cfgs[0].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 };
chanmon_cfgs[1].fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 6000 };
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.first().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap();
(route, payment_hash, payment_preimage)
}}
- };
+ }
- let (route, our_payment_hash, _) = get_route_and_payment_hash!(1000);
+ let (route, our_payment_hash, _) = get_route_and_payment_hash!(4843000);
unwrap_send_err!(nodes[1].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
assert_eq!(err, "Cannot send value that would put counterparty balance under holder-announced channel reserve value"));
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
let route = get_route(&nodes[1].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.first().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap();
(route, payment_hash, payment_preimage)
}}
- };
+ }
let (route, payment_hash, _) = get_route_and_payment_hash!(1000);
// Need to manually create the update_add_htlc message to go around the channel reserve check in send_htlc()
check_added_monitors!(nodes[0], 1);
}
+#[test]
+fn test_chan_reserve_dust_inbound_htlcs_outbound_chan() {
+ // Test that if we receive many dust HTLCs over an outbound channel, they don't count when
+ // calculating our commitment transaction fee (this was previously broken).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ // Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
+ // channel reserve violation (so their balance is channel reserve (1000 sats) + commitment
+ // transaction fee with 0 HTLCs (183 sats)).
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98817000, InitFeatures::known(), InitFeatures::known());
+
+ let dust_amt = 546000; // Dust amount
+ // In the previous code, routing this dust payment would cause nodes[0] to perceive a channel
+ // reserve violation even though it's a dust HTLC and therefore shouldn't count towards the
+ // commitment transaction fee.
+ let (_, _) = route_payment(&nodes[1], &[&nodes[0]], dust_amt);
+}
+
+#[test]
+fn test_chan_reserve_dust_inbound_htlcs_inbound_chan() {
+ // Test that if we receive many dust HTLCs over an inbound channel, they don't count when
+ // calculating our counterparty's commitment transaction fee (this was previously broken).
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
+ let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 98000000, InitFeatures::known(), InitFeatures::known());
+
+ let payment_amt = 46000; // Dust amount
+ // In the previous code, these first four payments would succeed.
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+
+ // Then these next 5 would be interpreted by nodes[1] as violating the fee spike buffer.
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+
+ // And this last payment previously resulted in nodes[1] closing on its inbound-channel
+ // counterparty, because it counted all the previous dust HTLCs against nodes[0]'s commitment
+ // transaction fee and therefore perceived this next payment as a channel reserve violation.
+ let (_, _) = route_payment(&nodes[0], &[&nodes[1]], payment_amt);
+}
+
#[test]
fn test_chan_reserve_violation_inbound_htlc_inbound_chan() {
let chanmon_cfgs = create_chanmon_cfgs(3);
let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap();
(route, payment_hash, payment_preimage)
}}
- };
+ }
let feemsat = 239;
let total_routing_fee_msat = (nodes.len() - 2) as u64 * feemsat;
let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes.last().unwrap().node.get_our_node_id(), None, &Vec::new(), $recv_value, TEST_FINAL_CLTV, &logger).unwrap();
(route, payment_hash, payment_preimage)
}}
- };
+ }
macro_rules! expect_forward {
($node: expr) => {{
// attempt to send amt_msat > their_max_htlc_value_in_flight_msat
{
- let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0 + 1);
+ let (mut route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_0);
+ route.paths[0].last_mut().unwrap().fee_msat += 1;
assert!(route.paths[0].iter().rev().skip(1).all(|h| h.fee_msat == feemsat));
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
let commit_tx_fee_0_htlcs = 2*commit_tx_fee_msat(feerate, 1);
let recv_value_3 = commit_tx_fee_2_htlcs - commit_tx_fee_0_htlcs - total_fee_msat;
- {
- let (route, our_payment_hash, _) = get_route_and_payment_hash!(recv_value_3 + 1);
- let err = nodes[0].node.send_payment(&route, our_payment_hash, &None).err().unwrap();
- match err {
- PaymentSendFailure::AllFailedRetrySafe(ref fails) => {
- match &fails[0] {
- &APIError::ChannelUnavailable{ref err} =>
- assert!(regex::Regex::new(r"Cannot send value that would put our balance under counterparty-announced channel reserve value \(\d+\)").unwrap().is_match(err)),
- _ => panic!("Unexpected error variant"),
- }
- },
- _ => panic!("Unexpected error variant"),
- }
- assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
- nodes[0].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Cannot send value that would put our balance under counterparty-announced channel reserve value".to_string(), 3);
- }
-
send_payment(&nodes[0], &vec![&nodes[1], &nodes[2]][..], recv_value_3, recv_value_3);
let commit_tx_fee_1_htlc = 2*commit_tx_fee_msat(feerate, 1 + 1);
bob_config.peer_channel_config_limits.force_announced_channel_preference = false;
bob_config.own_channel_config.our_to_self_delay = 6 * 24 * 3;
let user_cfgs = [Some(alice_config), Some(bob_config)];
- let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &user_cfgs);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
#[test]
fn claim_htlc_outputs_shared_tx() {
// Node revoked old state, htlcs haven't time out yet, claim them in shared justice tx
- let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
#[test]
fn claim_htlc_outputs_single_tx() {
// Node revoked old state, htlcs have timed out, claim each of them in separated justice tx
- let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
route_payment(&nodes[0], &[&nodes[1]], 10000000);
- nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id);
+ nodes[0].node.force_close_channel(&nodes[0].node.list_channels()[0].channel_id).unwrap();
check_closed_broadcast!(nodes[0], false);
check_added_monitors!(nodes[0], 1);
// state or updated nodes[1]' state. Now force-close and broadcast that commitment/HTLC
// transaction and ensure nodes[1] doesn't fail-backwards (this was originally a bug!).
- nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id);
+ nodes[2].node.force_close_channel(&payment_event.commitment_msg.channel_id).unwrap();
check_closed_broadcast!(nodes[2], false);
check_added_monitors!(nodes[2], 1);
let tx = {
// Now check that if we add the preimage to ChannelMonitor it broadcasts our HTLC-Success..
{
- let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.lock().unwrap();
- monitors.get_mut(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
+ let mut monitors = nodes[2].chain_monitor.chain_monitor.monitors.read().unwrap();
+ monitors.get(&OutPoint{ txid: Txid::from_slice(&payment_event.commitment_msg.channel_id[..]).unwrap(), index: 0 }).unwrap()
.provide_payment_preimage(&our_payment_hash, &our_payment_preimage, &node_cfgs[2].tx_broadcaster, &node_cfgs[2].fee_estimator, &&logger);
}
connect_block(&nodes[2], &block, 1);
nodes[0].net_graph_msg_handler.handle_htlc_fail_channel_update(&msgs::HTLCFailChannelUpdate::ChannelClosed { short_channel_id : as_chan.get_short_channel_id().unwrap(), is_permanent: false } );
- let as_bitcoin_key = as_chan.get_keys().inner.holder_channel_pubkeys.funding_pubkey;
- let bs_bitcoin_key = bs_chan.get_keys().inner.holder_channel_pubkeys.funding_pubkey;
+ let as_bitcoin_key = as_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
+ let bs_bitcoin_key = bs_chan.get_signer().inner.holder_channel_pubkeys.funding_pubkey;
let as_network_key = nodes[0].node.get_our_node_id();
let bs_network_key = nodes[1].node.get_our_node_id();
macro_rules! sign_msg {
($unsigned_msg: expr) => {
let msghash = Message::from_slice(&Sha256dHash::hash(&$unsigned_msg.encode()[..])[..]).unwrap();
- let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_keys().inner.funding_key);
- let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_keys().inner.funding_key);
+ let as_bitcoin_sig = secp_ctx.sign(&msghash, &as_chan.get_signer().inner.funding_key);
+ let bs_bitcoin_sig = secp_ctx.sign(&msghash, &bs_chan.get_signer().inner.funding_key);
let as_node_sig = secp_ctx.sign(&msghash, &nodes[0].keys_manager.get_node_secret());
let bs_node_sig = secp_ctx.sign(&msghash, &nodes[1].keys_manager.get_node_secret());
chan_announcement = msgs::ChannelAnnouncement {
let fee_estimator: test_utils::TestFeeEstimator;
let persister: test_utils::TestPersister;
let new_chain_monitor: test_utils::TestChainMonitor;
- let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 100000, 10001, InitFeatures::known(), InitFeatures::known());
let nodes_0_serialized = nodes[0].node.encode();
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.serialize_for_disk(&mut chan_0_monitor_serialized).unwrap();
+ nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
logger = test_utils::TestLogger::new();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
persister = test_utils::TestPersister::new();
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister);
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
nodes[0].chain_monitor = &new_chain_monitor;
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
+ let (_, mut chan_0_monitor) = <(Option<BlockHash>, ChannelMonitor<EnforcingSigner>)>::read(
+ &mut chan_0_monitor_read, keys_manager).unwrap();
assert!(chan_0_monitor_read.is_empty());
let mut nodes_0_read = &nodes_0_serialized[..];
let config = UserConfig::default();
- keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
let (_, nodes_0_deserialized_tmp) = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(Option<BlockHash>, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: config,
- keys_manager: &keys_manager,
+ keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
let persister: test_utils::TestPersister;
let logger: test_utils::TestLogger;
let new_chain_monitor: test_utils::TestChainMonitor;
- let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Start creating a channel, but stop right before broadcasting the event message FundingBroadcastSafe
let push_msat = 10001;
let a_flags = InitFeatures::known();
let b_flags = InitFeatures::known();
- let node_a = nodes.pop().unwrap();
- let node_b = nodes.pop().unwrap();
+ let node_a = nodes.remove(0);
+ let node_b = nodes.remove(0);
node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None).unwrap();
node_b.node.handle_open_channel(&node_a.node.get_our_node_id(), a_flags, &get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()));
node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), b_flags, &get_event_msg!(node_b, MessageSendEvent::SendAcceptChannel, node_a.node.get_our_node_id()));
// Start the de/seriailization process mid-channel creation to check that the channel manager will hold onto events that are serialized
let nodes_0_serialized = nodes[0].node.encode();
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.serialize_for_disk(&mut chan_0_monitor_serialized).unwrap();
+ nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
logger = test_utils::TestLogger::new();
persister = test_utils::TestPersister::new();
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister);
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
nodes[0].chain_monitor = &new_chain_monitor;
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
+ let (_, mut chan_0_monitor) = <(Option<BlockHash>, ChannelMonitor<EnforcingSigner>)>::read(
+ &mut chan_0_monitor_read, keys_manager).unwrap();
assert!(chan_0_monitor_read.is_empty());
let mut nodes_0_read = &nodes_0_serialized[..];
let config = UserConfig::default();
- keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
let (_, nodes_0_deserialized_tmp) = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(Option<BlockHash>, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: config,
- keys_manager: &keys_manager,
+ keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
let fee_estimator: test_utils::TestFeeEstimator;
let persister: test_utils::TestPersister;
let new_chain_monitor: test_utils::TestChainMonitor;
- let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
let nodes_0_serialized = nodes[0].node.encode();
let mut chan_0_monitor_serialized = test_utils::TestVecWriter(Vec::new());
- nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.serialize_for_disk(&mut chan_0_monitor_serialized).unwrap();
+ nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut chan_0_monitor_serialized).unwrap();
logger = test_utils::TestLogger::new();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
persister = test_utils::TestPersister::new();
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister);
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
nodes[0].chain_monitor = &new_chain_monitor;
let mut chan_0_monitor_read = &chan_0_monitor_serialized.0[..];
- let (_, mut chan_0_monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut chan_0_monitor_read).unwrap();
+ let (_, mut chan_0_monitor) = <(Option<BlockHash>, ChannelMonitor<EnforcingSigner>)>::read(
+ &mut chan_0_monitor_read, keys_manager).unwrap();
assert!(chan_0_monitor_read.is_empty());
let mut nodes_0_read = &nodes_0_serialized[..];
- keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
let (_, nodes_0_deserialized_tmp) = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(chan_0_monitor.get_funding_txo().0, &mut chan_0_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(Option<BlockHash>, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: UserConfig::default(),
- keys_manager: &keys_manager,
+ keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
let fee_estimator: test_utils::TestFeeEstimator;
let persister: test_utils::TestPersister;
let new_chain_monitor: test_utils::TestChainMonitor;
- let keys_manager: test_utils::TestKeysInterface;
- let nodes_0_deserialized: ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
+ let nodes_0_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes(&nodes, 2, 0, InitFeatures::known(), InitFeatures::known());
let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 3, InitFeatures::known(), InitFeatures::known());
let mut node_0_stale_monitors_serialized = Vec::new();
- for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
+ for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
let mut writer = test_utils::TestVecWriter(Vec::new());
- monitor.1.serialize_for_disk(&mut writer).unwrap();
+ monitor.1.write(&mut writer).unwrap();
node_0_stale_monitors_serialized.push(writer.0);
}
// Now the ChannelMonitor (which is now out-of-sync with ChannelManager for channel w/
// nodes[3])
let mut node_0_monitors_serialized = Vec::new();
- for monitor in nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter() {
+ for monitor in nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter() {
let mut writer = test_utils::TestVecWriter(Vec::new());
- monitor.1.serialize_for_disk(&mut writer).unwrap();
+ monitor.1.write(&mut writer).unwrap();
node_0_monitors_serialized.push(writer.0);
}
logger = test_utils::TestLogger::new();
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
persister = test_utils::TestPersister::new();
- new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister);
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[0].chain_source), nodes[0].tx_broadcaster.clone(), &logger, &fee_estimator, &persister, keys_manager);
nodes[0].chain_monitor = &new_chain_monitor;
+
let mut node_0_stale_monitors = Vec::new();
for serialized in node_0_stale_monitors_serialized.iter() {
let mut read = &serialized[..];
- let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut read).unwrap();
+ let (_, monitor) = <(Option<BlockHash>, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
assert!(read.is_empty());
node_0_stale_monitors.push(monitor);
}
let mut node_0_monitors = Vec::new();
for serialized in node_0_monitors_serialized.iter() {
let mut read = &serialized[..];
- let (_, monitor) = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut read).unwrap();
+ let (_, monitor) = <(Option<BlockHash>, ChannelMonitor<EnforcingSigner>)>::read(&mut read, keys_manager).unwrap();
assert!(read.is_empty());
node_0_monitors.push(monitor);
}
- keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
-
let mut nodes_0_read = &nodes_0_serialized[..];
if let Err(msgs::DecodeError::InvalidValue) =
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(Option<BlockHash>, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: UserConfig::default(),
- keys_manager: &keys_manager,
+ keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
let mut nodes_0_read = &nodes_0_serialized[..];
let (_, nodes_0_deserialized_tmp) =
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
+ <(Option<BlockHash>, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut nodes_0_read, ChannelManagerReadArgs {
default_config: UserConfig::default(),
- keys_manager: &keys_manager,
+ keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: nodes[0].chain_monitor,
tx_broadcaster: nodes[0].tx_broadcaster.clone(),
macro_rules! check_spendable_outputs {
($node: expr, $der_idx: expr, $keysinterface: expr, $chan_value: expr) => {
{
- let events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
+ let mut events = $node.chain_monitor.chain_monitor.get_and_clear_pending_events();
let mut txn = Vec::new();
- for event in events {
+ let mut all_outputs = Vec::new();
+ let secp_ctx = Secp256k1::new();
+ for event in events.drain(..) {
match event {
- Event::SpendableOutputs { ref outputs } => {
- for outp in outputs {
- match *outp {
- SpendableOutputDescriptor::StaticOutputCounterpartyPayment { ref outpoint, ref output, ref key_derivation_params } => {
- let input = TxIn {
- previous_output: outpoint.into_bitcoin_outpoint(),
- script_sig: Script::new(),
- sequence: 0,
- witness: Vec::new(),
- };
- let outp = TxOut {
- script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
- value: output.value,
- };
- let mut spend_tx = Transaction {
- version: 2,
- lock_time: 0,
- input: vec![input],
- output: vec![outp],
- };
- spend_tx.output[0].value -= (spend_tx.get_weight() + 2 + 1 + 73 + 35 + 3) as u64 / 4; // (Max weight + 3 (to round up)) / 4
- let secp_ctx = Secp256k1::new();
- let keys = $keysinterface.derive_channel_keys($chan_value, key_derivation_params.0, key_derivation_params.1);
- let remotepubkey = keys.pubkeys().payment_point;
- let witness_script = Address::p2pkh(&::bitcoin::PublicKey{compressed: true, key: remotepubkey}, Network::Testnet).script_pubkey();
- let sighash = Message::from_slice(&bip143::SigHashCache::new(&spend_tx).signature_hash(0, &witness_script, output.value, SigHashType::All)[..]).unwrap();
- let remotesig = secp_ctx.sign(&sighash, &keys.inner.payment_key);
- spend_tx.input[0].witness.push(remotesig.serialize_der().to_vec());
- spend_tx.input[0].witness[0].push(SigHashType::All as u8);
- spend_tx.input[0].witness.push(remotepubkey.serialize().to_vec());
- txn.push(spend_tx);
- },
- SpendableOutputDescriptor::DynamicOutputP2WSH { ref outpoint, ref per_commitment_point, ref to_self_delay, ref output, ref key_derivation_params, ref revocation_pubkey } => {
- let input = TxIn {
- previous_output: outpoint.into_bitcoin_outpoint(),
- script_sig: Script::new(),
- sequence: *to_self_delay as u32,
- witness: Vec::new(),
- };
- let outp = TxOut {
- script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
- value: output.value,
- };
- let mut spend_tx = Transaction {
- version: 2,
- lock_time: 0,
- input: vec![input],
- output: vec![outp],
- };
- let secp_ctx = Secp256k1::new();
- let keys = $keysinterface.derive_channel_keys($chan_value, key_derivation_params.0, key_derivation_params.1);
- if let Ok(delayed_payment_key) = chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &keys.inner.delayed_payment_base_key) {
-
- let delayed_payment_pubkey = PublicKey::from_secret_key(&secp_ctx, &delayed_payment_key);
- let witness_script = chan_utils::get_revokeable_redeemscript(revocation_pubkey, *to_self_delay, &delayed_payment_pubkey);
- spend_tx.output[0].value -= (spend_tx.get_weight() + 2 + 1 + 73 + 1 + witness_script.len() + 1 + 3) as u64 / 4; // (Max weight + 3 (to round up)) / 4
- let sighash = Message::from_slice(&bip143::SigHashCache::new(&spend_tx).signature_hash(0, &witness_script, output.value, SigHashType::All)[..]).unwrap();
- let local_delayedsig = secp_ctx.sign(&sighash, &delayed_payment_key);
- spend_tx.input[0].witness.push(local_delayedsig.serialize_der().to_vec());
- spend_tx.input[0].witness[0].push(SigHashType::All as u8);
- spend_tx.input[0].witness.push(vec!()); //MINIMALIF
- spend_tx.input[0].witness.push(witness_script.clone().into_bytes());
- } else { panic!() }
- txn.push(spend_tx);
- },
- SpendableOutputDescriptor::StaticOutput { ref outpoint, ref output } => {
- let secp_ctx = Secp256k1::new();
- let input = TxIn {
- previous_output: outpoint.into_bitcoin_outpoint(),
- script_sig: Script::new(),
- sequence: 0,
- witness: Vec::new(),
- };
- let outp = TxOut {
- script_pubkey: Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(),
- value: output.value,
- };
- let mut spend_tx = Transaction {
- version: 2,
- lock_time: 0,
- input: vec![input],
- output: vec![outp.clone()],
- };
- spend_tx.output[0].value -= (spend_tx.get_weight() + 2 + 1 + 73 + 35 + 3) as u64 / 4; // (Max weight + 3 (to round up)) / 4
- let secret = {
- match ExtendedPrivKey::new_master(Network::Testnet, &$node.node_seed) {
- Ok(master_key) => {
- match master_key.ckd_priv(&secp_ctx, ChildNumber::from_hardened_idx($der_idx).expect("key space exhausted")) {
- Ok(key) => key,
- Err(_) => panic!("Your RNG is busted"),
- }
- }
- Err(_) => panic!("Your rng is busted"),
- }
- };
- let pubkey = ExtendedPubKey::from_private(&secp_ctx, &secret).public_key;
- let witness_script = Address::p2pkh(&pubkey, Network::Testnet).script_pubkey();
- let sighash = Message::from_slice(&bip143::SigHashCache::new(&spend_tx).signature_hash(0, &witness_script, output.value, SigHashType::All)[..]).unwrap();
- let sig = secp_ctx.sign(&sighash, &secret.private_key.key);
- spend_tx.input[0].witness.push(sig.serialize_der().to_vec());
- spend_tx.input[0].witness[0].push(SigHashType::All as u8);
- spend_tx.input[0].witness.push(pubkey.key.serialize().to_vec());
- txn.push(spend_tx);
- },
- }
+ Event::SpendableOutputs { mut outputs } => {
+ for outp in outputs.drain(..) {
+ txn.push($keysinterface.backing.spend_spendable_outputs(&[&outp], Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx).unwrap());
+ all_outputs.push(outp);
}
},
_ => panic!("Unexpected event"),
};
}
+ if all_outputs.len() > 1 {
+ if let Ok(tx) = $keysinterface.backing.spend_spendable_outputs(&all_outputs.iter().map(|a| a).collect::<Vec<_>>(), Vec::new(), Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script(), 253, &secp_ctx) {
+ txn.push(tx);
+ }
+ }
txn
}
}
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
- nodes[1].node.force_close_channel(&chan.2);
+ nodes[1].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[1], false);
check_added_monitors!(nodes[1], 1);
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 99000000, InitFeatures::known(), InitFeatures::known());
- nodes[0].node.force_close_channel(&chan.2);
+ nodes[0].node.force_close_channel(&chan.2).unwrap();
check_closed_broadcast!(nodes[0], false);
check_added_monitors!(nodes[0], 1);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1, 1, true, header.block_hash());
let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
- assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn.len(), 3);
check_spends!(spend_txn[0], revoked_local_txn[0]); // to_remote output on revoked remote commitment_tx
check_spends!(spend_txn[1], node_txn[0]);
+ check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[0]); // Both outputs
}
#[test]
expect_payment_failed!(nodes[1], our_payment_hash, true);
let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
- assert_eq!(spend_txn.len(), 2); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
+ assert_eq!(spend_txn.len(), 3); // SpendableOutput: remote_commitment_tx.to_remote, timeout_tx.output
+ check_spends!(spend_txn[0], commitment_tx[0]);
check_spends!(spend_txn[1], node_txn[0]);
+ check_spends!(spend_txn[2], node_txn[0], commitment_tx[0]); // All outputs
}
#[test]
#[test]
fn test_static_spendable_outputs_justice_tx_revoked_htlc_timeout_tx() {
- let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
#[test]
fn test_static_spendable_outputs_justice_tx_revoked_htlc_success_tx() {
- let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Check A's ChannelMonitor was able to generate the right spendable output descriptor
let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000);
- assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn.len(), 3);
assert_eq!(spend_txn[0].input.len(), 1);
check_spends!(spend_txn[0], revoked_local_txn[0]); // spending to_remote output from revoked local tx
assert_ne!(spend_txn[0].input[0].previous_output, revoked_htlc_txn[0].input[0].previous_output);
check_spends!(spend_txn[1], node_txn[1]); // spending justice tx output on the htlc success tx
+ check_spends!(spend_txn[2], revoked_local_txn[0], node_txn[1]); // Both outputs
}
#[test]
let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9000000).0;
let local_txn = get_local_commitment_txn!(nodes[1], chan_1.2);
+ assert_eq!(local_txn.len(), 1);
assert_eq!(local_txn[0].input.len(), 1);
check_spends!(local_txn[0], chan_1.3);
}
let node_txn = {
let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ assert_eq!(node_txn.len(), 3);
+ assert_eq!(node_txn[0], node_txn[2]);
+ assert_eq!(node_txn[1], local_txn[0]);
assert_eq!(node_txn[0].input.len(), 1);
assert_eq!(node_txn[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT);
check_spends!(node_txn[0], local_txn[0]);
- vec![node_txn[0].clone(), node_txn[2].clone()]
+ vec![node_txn[0].clone()]
};
let header_201 = BlockHeader { version: 0x20000000, prev_blockhash: header.block_hash(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 };
// Verify that B is able to spend its own HTLC-Success tx thanks to spendable output event given back by its ChannelMonitor
let spend_txn = check_spendable_outputs!(nodes[1], 1, node_cfgs[1].keys_manager, 100000);
- assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0]);
- check_spends!(spend_txn[1], node_txn[1]);
}
fn do_test_fail_backwards_unrevoked_remote_announce(deliver_last_raa: bool, announce_latest: bool) {
// Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000);
- assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn.len(), 3);
check_spends!(spend_txn[0], local_txn[0]);
check_spends!(spend_txn[1], htlc_timeout);
+ check_spends!(spend_txn[2], local_txn[0], htlc_timeout);
}
#[test]
// We manually create the node configuration to backup the seed.
let seed = [42; 32];
let keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
- let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister);
- let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager, node_seed: seed };
+ let chain_monitor = test_utils::TestChainMonitor::new(Some(&chanmon_cfgs[0].chain_source), &chanmon_cfgs[0].tx_broadcaster, &chanmon_cfgs[0].logger, &chanmon_cfgs[0].fee_estimator, &chanmon_cfgs[0].persister, &keys_manager);
+ let node = NodeCfg { chain_source: &chanmon_cfgs[0].chain_source, logger: &chanmon_cfgs[0].logger, tx_broadcaster: &chanmon_cfgs[0].tx_broadcaster, fee_estimator: &chanmon_cfgs[0].fee_estimator, chain_monitor, keys_manager: &keys_manager, node_seed: seed };
let mut node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
node_cfgs.remove(0);
node_cfgs.insert(0, node);
// Verify that A is able to spend its own HTLC-Timeout tx thanks to spendable output event given back by its ChannelMonitor
let new_keys_manager = test_utils::TestKeysInterface::new(&seed, Network::Testnet);
let spend_txn = check_spendable_outputs!(nodes[0], 1, new_keys_manager, 100000);
- assert_eq!(spend_txn.len(), 2);
+ assert_eq!(spend_txn.len(), 3);
check_spends!(spend_txn[0], local_txn_1[0]);
check_spends!(spend_txn[1], htlc_timeout);
+ check_spends!(spend_txn[2], local_txn_1[0], htlc_timeout);
}
#[test]
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 0, InitFeatures::known(), InitFeatures::known());
+ let _chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 0, InitFeatures::known(), InitFeatures::known());
let logger = test_utils::TestLogger::new();
let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
send_payment(&nodes[0], &vec!(&nodes[1])[..], max_in_flight, max_in_flight);
let (_, our_payment_hash) = get_payment_preimage_hash!(nodes[0]);
- let net_graph_msg_handler = &nodes[0].net_graph_msg_handler;
- let logger = test_utils::TestLogger::new();
- let route = get_route(&nodes[0].node.get_our_node_id(), &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[1].node.get_our_node_id(), None, &[], max_in_flight+1, TEST_FINAL_CLTV, &logger).unwrap();
+ // Manually create a route over our max in flight (which our router normally automatically
+ // limits us to.
+ let route = Route { paths: vec![vec![RouteHop {
+ pubkey: nodes[1].node.get_our_node_id(), node_features: NodeFeatures::known(), channel_features: ChannelFeatures::known(),
+ short_channel_id: nodes[1].node.list_usable_channels()[0].short_channel_id.unwrap(),
+ fee_msat: max_in_flight + 1, cltv_expiry_delta: TEST_FINAL_CLTV
+ }]] };
unwrap_send_err!(nodes[0].node.send_payment(&route, our_payment_hash, &None), true, APIError::ChannelUnavailable { ref err },
assert!(regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap().is_match(err)));
// We can have at most two valid local commitment tx, so both cases must be covered, and both txs must be checked to get them all as
// HTLC could have been removed from lastest local commitment tx but still valid until we get remote RAA
- let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let mut node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
// Test we enforce upfront_scriptpbukey if by providing a diffrent one at closing that we disconnect peer
- nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+ nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
assert!(regex::Regex::new(r"Got shutdown request with a scriptpubkey \([A-Fa-f0-9]+\) which did not match their previous scriptpubkey.").unwrap().is_match(check_closed_broadcast!(nodes[2], true).unwrap().data.as_str()));
check_added_monitors!(nodes[2], 1);
nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[2].node.get_our_node_id());
// We test that in case of peer committing upfront to a script, if it oesn't change at closing, we sign
- nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
+ nodes[2].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
let events = nodes[2].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
let mut node_1_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
node_1_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_1_shutdown);
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &node_1_shutdown);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
match events[0] {
nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
node_0_shutdown.scriptpubkey = Builder::new().push_opcode(opcodes::all::OP_RETURN).into_script().to_p2sh();
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_0_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::SendShutdown { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::SendClosingSigned { node_id, .. } => { assert_eq!(node_id, nodes[1].node.get_our_node_id()) }
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_upfront_shutdown_script_unsupport_segwit() {
+ // We test that channel is closed early
+ // if a segwit program is passed as upfront shutdown script,
+ // but the peer does not support segwit.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 42, None).unwrap();
+
+ let mut open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ open_channel.shutdown_scriptpubkey = Present(Builder::new().push_int(16)
+ .push_slice(&[0, 0])
+ .into_script());
+
+ let features = InitFeatures::known().clear_shutdown_anysegwit();
+ nodes[0].node.handle_open_channel(&nodes[0].node.get_our_node_id(), features, &open_channel);
+
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 1);
+ match events[0] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[0].node.get_our_node_id());
+ assert!(regex::Regex::new(r"Peer is signaling upfront_shutdown but has provided a non-accepted scriptpubkey format. script: (\([A-Fa-f0-9]+\))").unwrap().is_match(&*msg.data));
+ },
+ _ => panic!("Unexpected event"),
+ }
+}
+
+#[test]
+fn test_shutdown_script_any_segwit_allowed() {
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ //// We test if the remote peer accepts opt_shutdown_anysegwit, a witness program can be used on shutdown
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
+ .push_slice(&[0, 0])
+ .into_script();
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
let events = nodes[0].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 2);
match events[0] {
}
}
+#[test]
+fn test_shutdown_script_any_segwit_not_allowed() {
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ //// We test that if the remote peer does not accept opt_shutdown_anysegwit, the witness program cannot be used on shutdown
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ // Make an any segwit version script
+ node_0_shutdown.scriptpubkey = Builder::new().push_int(16)
+ .push_slice(&[0, 0])
+ .into_script();
+ let flags_no = InitFeatures::known().clear_shutdown_anysegwit();
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &flags_no, &node_0_shutdown);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[1] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(msg.data, "Got a nonstandard scriptpubkey (60020000) from remote peer".to_owned())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ check_added_monitors!(nodes[0], 1);
+}
+
+#[test]
+fn test_shutdown_script_segwit_but_not_anysegwit() {
+ let mut config = UserConfig::default();
+ config.channel_options.announced_channel = true;
+ config.peer_channel_config_limits.force_announced_channel_preference = false;
+ config.channel_options.commit_upfront_shutdown_pubkey = false;
+ let user_cfgs = [None, Some(config), None];
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &user_cfgs);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ //// We test that if shutdown any segwit is supported and we send a witness script with 0 version, this is not accepted
+ let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+ nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id()).unwrap();
+ let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
+ // Make a segwit script that is not a valid as any segwit
+ node_0_shutdown.scriptpubkey = Builder::new().push_int(0)
+ .push_slice(&[0, 0])
+ .into_script();
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &node_0_shutdown);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[1] {
+ MessageSendEvent::HandleError { action: ErrorAction::SendErrorMessage { ref msg }, node_id } => {
+ assert_eq!(node_id, nodes[1].node.get_our_node_id());
+ assert_eq!(msg.data, "Got a nonstandard scriptpubkey (00020000) from remote peer".to_owned())
+ },
+ _ => panic!("Unexpected event"),
+ }
+ check_added_monitors!(nodes[0], 1);
+}
+
#[test]
fn test_user_configurable_csv_delay() {
// We test our channel constructors yield errors when we pass them absurd csv delay
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// We test config.our_to_self > BREAKDOWN_TIMEOUT is enforced in Channel::new_outbound()
- let keys_manager: Arc<KeysInterface<ChanKeySigner = EnforcingChannelKeys>> = Arc::new(test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet));
- if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
+ if let Err(error) = Channel::new_outbound(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), 1000000, 1000000, 0, &low_our_to_self_config) {
match error {
APIError::APIMisuseError { err } => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
+ if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &low_our_to_self_config) {
match error {
ChannelError::Close(err) => { assert!(regex::Regex::new(r"Configured with an unreasonable our_to_self_delay \(\d+\) putting user funds at risks").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 1000000, 1000000, 42, None).unwrap();
let mut open_channel = get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
open_channel.to_self_delay = 200;
- if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
+ if let Err(error) = Channel::new_from_req(&&test_utils::TestFeeEstimator { sat_per_kw: 253 }, &nodes[0].keys_manager, nodes[1].node.get_our_node_id(), InitFeatures::known(), &open_channel, 0, &high_their_to_self_config) {
match error {
ChannelError::Close(err) => { assert!(regex::Regex::new(r"They wanted our payments to be delayed by a needlessly long period\. Upper limit: \d+\. Actual: \d+").unwrap().is_match(err.as_str())); },
_ => panic!("Unexpected event"),
fn test_data_loss_protect() {
// We want to be sure that :
// * we don't broadcast our Local Commitment Tx in case of fallen behind
+ // (but this is not quite true - we broadcast during Drop because chanmon is out of sync with chanmgr)
// * we close channel in case of detecting other being fallen behind
// * we are able to claim our own outputs thanks to to_remote being static
- let keys_manager;
+ // TODO: this test is incomplete and the data_loss_protect implementation is incomplete - see issue #775
let persister;
let logger;
let fee_estimator;
let tx_broadcaster;
let chain_source;
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ // We broadcast during Drop because chanmon is out of sync with chanmgr, which would cause a panic
+ // during signing due to revoked tx
+ chanmon_cfgs[0].keys_manager.disable_revocation_policy_check = true;
+ let keys_manager = &chanmon_cfgs[0].keys_manager;
let monitor;
let node_state_0;
- let chanmon_cfgs = create_chanmon_cfgs(2);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// Cache node A state before any channel update
let previous_node_state = nodes[0].node.encode();
let mut previous_chain_monitor_state = test_utils::TestVecWriter(Vec::new());
- nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap().iter().next().unwrap().1.serialize_for_disk(&mut previous_chain_monitor_state).unwrap();
+ nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap().iter().next().unwrap().1.write(&mut previous_chain_monitor_state).unwrap();
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000, 8_000_000);
// Restore node A from previous state
logger = test_utils::TestLogger::with_id(format!("node {}", 0));
- let mut chain_monitor = <(BlockHash, ChannelMonitor<EnforcingChannelKeys>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0)).unwrap().1;
+ let mut chain_monitor = <(Option<BlockHash>, ChannelMonitor<EnforcingSigner>)>::read(&mut ::std::io::Cursor::new(previous_chain_monitor_state.0), keys_manager).unwrap().1;
chain_source = test_utils::TestChainSource::new(Network::Testnet);
tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new())};
fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: 253 };
- keys_manager = test_utils::TestKeysInterface::new(&nodes[0].node_seed, Network::Testnet);
persister = test_utils::TestPersister::new();
- monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister);
+ monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &fee_estimator, &persister, keys_manager);
node_state_0 = {
let mut channel_monitors = HashMap::new();
channel_monitors.insert(OutPoint { txid: chan.3.txid(), index: 0 }, &mut chain_monitor);
- <(BlockHash, ChannelManager<EnforcingChannelKeys, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
- keys_manager: &keys_manager,
+ <(Option<BlockHash>, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut ::std::io::Cursor::new(previous_node_state), ChannelManagerReadArgs {
+ keys_manager: keys_manager,
fee_estimator: &fee_estimator,
chain_monitor: &monitor,
logger: &logger,
let header = BlockHeader { version: 0x20000000, prev_blockhash: Default::default(), merkle_root: Default::default(), time: 42, bits: 42, nonce: 42};
connect_block(&nodes[0], &Block { header, txdata: vec![node_txn[0].clone()]}, 0);
connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1, 0, true, header.block_hash());
- let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 100000);
+ let spend_txn = check_spendable_outputs!(nodes[0], 1, node_cfgs[0].keys_manager, 1000000);
assert_eq!(spend_txn.len(), 1);
check_spends!(spend_txn[0], node_txn[0]);
}
// In case of penalty txn with too low feerates for getting into mempools, RBF-bump them to sure
// we're able to claim outputs on revoked HTLC transactions before timelocks expiration
- let chanmon_cfgs = create_chanmon_cfgs(2);
+ let mut chanmon_cfgs = create_chanmon_cfgs(2);
+ chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true;
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
// commitment transaction, we would have happily carried on and provided them the next
// commitment transaction based on one RAA forward. This would probably eventually have led to
// channel closure, but it would not have resulted in funds loss. Still, our
- // EnforcingChannelKeys would have paniced as it doesn't like jumps into the future. Here, we
+ // EnforcingSigner would have paniced as it doesn't like jumps into the future. Here, we
// check simply that the channel is closed in response to such an RAA, but don't check whether
// we decide to punish our counterparty for revoking their funds (as we don't currently
// implement that).
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
let mut guard = nodes[0].node.channel_state.lock().unwrap();
- let keys = &guard.by_id.get_mut(&channel_id).unwrap().holder_keys;
+ let keys = &guard.by_id.get_mut(&channel_id).unwrap().get_signer();
const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+ let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+ // Must revoke without gaps
+ keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
&SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
- let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
&msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
connect_block(&nodes[0], &Block { header: header_130, txdata: penalty_txn }, 130);
connect_blocks(&nodes[0], 5, 130, false, header_130.block_hash());
{
- let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+ let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
if let Some(monitor) = monitors.get(&OutPoint { txid: chan.3.txid(), index: 0 }) {
- assert!(monitor.onchain_tx_handler.pending_claim_requests.is_empty());
- assert!(monitor.onchain_tx_handler.claimable_outpoints.is_empty());
+ assert!(monitor.inner.lock().unwrap().onchain_tx_handler.pending_claim_requests.is_empty());
+ assert!(monitor.inner.lock().unwrap().onchain_tx_handler.claimable_outpoints.is_empty());
}
}
}
let logger = test_utils::TestLogger::with_id(format!("node {}", 0));
let persister = test_utils::TestPersister::new();
let watchtower = {
- let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+ let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
let monitor = monitors.get(&outpoint).unwrap();
let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.serialize_for_disk(&mut w).unwrap();
- let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
- &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
+ monitor.write(&mut w).unwrap();
+ let new_monitor = <(Option<BlockHash>, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+ &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
- let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister);
+ let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
watchtower
};
let logger = test_utils::TestLogger::with_id(format!("node {}", "Alice"));
let persister = test_utils::TestPersister::new();
let watchtower_alice = {
- let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+ let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
let monitor = monitors.get(&outpoint).unwrap();
let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.serialize_for_disk(&mut w).unwrap();
- let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
- &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
+ monitor.write(&mut w).unwrap();
+ let new_monitor = <(Option<BlockHash>, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+ &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
- let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister);
+ let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
watchtower
};
let logger = test_utils::TestLogger::with_id(format!("node {}", "Bob"));
let persister = test_utils::TestPersister::new();
let watchtower_bob = {
- let monitors = nodes[0].chain_monitor.chain_monitor.monitors.lock().unwrap();
+ let monitors = nodes[0].chain_monitor.chain_monitor.monitors.read().unwrap();
let monitor = monitors.get(&outpoint).unwrap();
let mut w = test_utils::TestVecWriter(Vec::new());
- monitor.serialize_for_disk(&mut w).unwrap();
- let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor<EnforcingChannelKeys>)>::read(
- &mut ::std::io::Cursor::new(&w.0)).unwrap().1;
+ monitor.write(&mut w).unwrap();
+ let new_monitor = <(Option<BlockHash>, channelmonitor::ChannelMonitor<EnforcingSigner>)>::read(
+ &mut ::std::io::Cursor::new(&w.0), &test_utils::OnlyReadsKeysInterface {}).unwrap().1;
assert!(new_monitor == *monitor);
- let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister);
+ let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager);
assert!(watchtower.watch_channel(outpoint, new_monitor).is_ok());
watchtower
};
// responds by (1) broadcasting a channel update and (2) adding a new ChannelMonitor.
let mut force_closing_node = 0; // Alice force-closes
if !broadcast_alice { force_closing_node = 1; } // Bob force-closes
- nodes[force_closing_node].node.force_close_channel(&chan_ab.2);
+ nodes[force_closing_node].node.force_close_channel(&chan_ab.2).unwrap();
check_closed_broadcast!(nodes[force_closing_node], false);
check_added_monitors!(nodes[force_closing_node], 1);
if go_onchain_before_fulfill {
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &as_update, &bs_update);
send_payment(&nodes[0], &[&nodes[1]], 8000000, 8_000_000);
}
+
+#[test]
+fn test_error_chans_closed() {
+ // Test that we properly handle error messages, closing appropriate channels.
+ //
+ // Prior to #787 we'd allow a peer to make us force-close a channel we had with a different
+ // peer. The "real" fix for that is to index channels with peers_ids, however in the mean time
+ // we can test various edge cases around it to ensure we don't regress.
+ let chanmon_cfgs = create_chanmon_cfgs(3);
+ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
+ let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
+
+ // Create some initial channels
+ let chan_1 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ let chan_3 = create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
+ assert_eq!(nodes[1].node.list_usable_channels().len(), 2);
+ assert_eq!(nodes[2].node.list_usable_channels().len(), 1);
+
+ // Closing a channel from a different peer has no effect
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_3.2, data: "ERR".to_owned() });
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 3);
+
+ // Closing one channel doesn't impact others
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: chan_2.2, data: "ERR".to_owned() });
+ check_added_monitors!(nodes[0], 1);
+ check_closed_broadcast!(nodes[0], false);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 2);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_1.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_1.2);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2 || nodes[0].node.list_usable_channels()[1].channel_id == chan_3.2);
+
+ // A null channel ID should close all channels
+ let _chan_4 = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 10001, InitFeatures::known(), InitFeatures::known());
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id: [0; 32], data: "ERR".to_owned() });
+ check_added_monitors!(nodes[0], 2);
+ let events = nodes[0].node.get_and_clear_pending_msg_events();
+ assert_eq!(events.len(), 2);
+ match events[0] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ match events[1] {
+ MessageSendEvent::BroadcastChannelUpdate { ref msg } => {
+ assert_eq!(msg.contents.flags & 2, 2);
+ },
+ _ => panic!("Unexpected event"),
+ }
+ // Note that at this point users of a standard PeerHandler will end up calling
+ // peer_disconnected with no_connection_possible set to false, duplicating the
+ // close-all-channels logic. That's OK, we don't want to end up not force-closing channels for
+ // users with their own peer handling logic. We duplicate the call here, however.
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), true);
+ assert_eq!(nodes[0].node.list_usable_channels().len(), 1);
+ assert!(nodes[0].node.list_usable_channels()[0].channel_id == chan_3.2);
+}
use ln::msgs;
use util::ser::{Writeable, Readable, Writer};
use util::logger::Logger;
-use util::events;
+use util::events::{MessageSendEvent, MessageSendEventsProvider};
use std::{cmp, fmt};
use std::sync::{RwLock, RwLockReadGuard};
use std::ops::Deref;
use bitcoin::hashes::hex::ToHex;
+/// The maximum number of extra bytes which we do not understand in a gossip message before we will
+/// refuse to relay the message.
+const MAX_EXCESS_BYTES_FOR_RELAY: usize = 1024;
+
/// Represents the network as nodes and channels between them
-#[derive(PartialEq)]
+#[derive(Clone, PartialEq)]
pub struct NetworkGraph {
genesis_hash: BlockHash,
channels: BTreeMap<u64, ChannelInfo>,
pub network_graph: RwLock<NetworkGraph>,
chain_access: Option<C>,
full_syncs_requested: AtomicUsize,
- pending_events: Mutex<Vec<events::MessageSendEvent>>,
+ pending_events: Mutex<Vec<MessageSendEvent>>,
logger: L,
}
}
}
+ /// Adds a provider used to check new announcements. Does not affect
+ /// existing announcements unless they are updated.
+ /// Add, update or remove the provider would replace the current one.
+ pub fn add_chain_access(&mut self, chain_access: Option<C>) {
+ self.chain_access = chain_access;
+ }
+
/// Take a read lock on the network_graph and return it in the C-bindings
/// newtype helper. This is likely only useful when called via the C
/// bindings as you can call `self.network_graph.read().unwrap()` in Rust
impl<C: Deref + Sync + Send, L: Deref + Sync + Send> RoutingMessageHandler for NetGraphMsgHandler<C, L> where C::Target: chain::Access, L::Target: Logger {
fn handle_node_announcement(&self, msg: &msgs::NodeAnnouncement) -> Result<bool, LightningError> {
self.network_graph.write().unwrap().update_node_from_announcement(msg, &self.secp_ctx)?;
- Ok(msg.contents.excess_data.is_empty() && msg.contents.excess_address_data.is_empty())
+ Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY &&
+ msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY &&
+ msg.contents.excess_data.len() + msg.contents.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
}
fn handle_channel_announcement(&self, msg: &msgs::ChannelAnnouncement) -> Result<bool, LightningError> {
self.network_graph.write().unwrap().update_channel_from_announcement(msg, &self.chain_access, &self.secp_ctx)?;
log_trace!(self.logger, "Added channel_announcement for {}{}", msg.contents.short_channel_id, if !msg.contents.excess_data.is_empty() { " with excess uninterpreted data!" } else { "" });
- Ok(msg.contents.excess_data.is_empty())
+ Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
}
fn handle_htlc_fail_channel_update(&self, update: &msgs::HTLCFailChannelUpdate) {
fn handle_channel_update(&self, msg: &msgs::ChannelUpdate) -> Result<bool, LightningError> {
self.network_graph.write().unwrap().update_channel(msg, &self.secp_ctx)?;
- Ok(msg.contents.excess_data.is_empty())
+ Ok(msg.contents.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY)
}
fn get_next_channel_announcements(&self, starting_point: u64, batch_amount: u8) -> Vec<(ChannelAnnouncement, Option<ChannelUpdate>, Option<ChannelUpdate>)> {
let number_of_blocks = 0xffffffff;
log_debug!(self.logger, "Sending query_channel_range peer={}, first_blocknum={}, number_of_blocks={}", log_pubkey!(their_node_id), first_blocknum, number_of_blocks);
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::MessageSendEvent::SendChannelRangeQuery {
+ pending_events.push(MessageSendEvent::SendChannelRangeQuery {
node_id: their_node_id.clone(),
msg: QueryChannelRange {
chain_hash: self.network_graph.read().unwrap().genesis_hash,
/// does not match our chain_hash will be rejected when the announcement is
/// processed.
fn handle_reply_channel_range(&self, their_node_id: &PublicKey, msg: ReplyChannelRange) -> Result<(), LightningError> {
- log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, full_information={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.full_information, msg.short_channel_ids.len(),);
-
- // Validate that the remote node maintains up-to-date channel
- // information for chain_hash. Some nodes use the full_information
- // flag to indicate multi-part messages so we must check whether
- // we received SCIDs as well.
- if !msg.full_information && msg.short_channel_ids.len() == 0 {
- return Err(LightningError {
- err: String::from("Received reply_channel_range with no information available"),
- action: ErrorAction::IgnoreError,
- });
- }
+ log_debug!(self.logger, "Handling reply_channel_range peer={}, first_blocknum={}, number_of_blocks={}, sync_complete={}, scids={}", log_pubkey!(their_node_id), msg.first_blocknum, msg.number_of_blocks, msg.sync_complete, msg.short_channel_ids.len(),);
log_debug!(self.logger, "Sending query_short_channel_ids peer={}, batch_size={}", log_pubkey!(their_node_id), msg.short_channel_ids.len());
let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::MessageSendEvent::SendShortIdsQuery {
+ pending_events.push(MessageSendEvent::SendShortIdsQuery {
node_id: their_node_id.clone(),
msg: QueryShortChannelIds {
chain_hash: msg.chain_hash,
}
}
-impl<C: Deref, L: Deref> events::MessageSendEventsProvider for NetGraphMsgHandler<C, L>
+impl<C: Deref, L: Deref> MessageSendEventsProvider for NetGraphMsgHandler<C, L>
where
C::Target: chain::Access,
L::Target: Logger,
{
- fn get_and_clear_pending_msg_events(&self) -> Vec<events::MessageSendEvent> {
+ fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let mut ret = Vec::new();
let mut pending_events = self.pending_events.lock().unwrap();
std::mem::swap(&mut ret, &mut pending_events);
}
}
-#[derive(PartialEq, Debug)]
+#[derive(Clone, Debug, PartialEq)]
/// Details about one direction of a channel. Received
/// within a channel update.
pub struct DirectionalChannelInfo {
last_update_message
});
-#[derive(PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
/// Details about a channel (both directions).
/// Received within a channel announcement.
pub struct ChannelInfo {
}
}
-#[derive(PartialEq, Debug)]
+#[derive(Clone, Debug, PartialEq)]
/// Information received in the latest node_announcement from this node.
pub struct NodeAnnouncementInfo {
/// Protocol features the node announced support for
}
}
-#[derive(PartialEq)]
+#[derive(Clone, Debug, PartialEq)]
/// Details about a node in the network, known from the network announcement.
pub struct NodeInfo {
/// All valid channels a node has announced
}
}
- let should_relay = msg.excess_data.is_empty() && msg.excess_address_data.is_empty();
+ let should_relay =
+ msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY &&
+ msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY &&
+ msg.excess_data.len() + msg.excess_address_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY;
node.announcement_info = Some(NodeAnnouncementInfo {
features: msg.features.clone(),
last_update: msg.timestamp,
node_two: msg.node_id_2.clone(),
two_to_one: None,
capacity_sats: utxo_value,
- announcement_message: if msg.excess_data.is_empty() { full_msg.cloned() } else { None },
+ announcement_message: if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
+ { full_msg.cloned() } else { None },
};
match self.channels.entry(msg.short_channel_id) {
chan_was_enabled = false;
}
- let last_update_message = if msg.excess_data.is_empty() { full_msg.cloned() } else { None };
+ let last_update_message = if msg.excess_data.len() <= MAX_EXCESS_BYTES_FOR_RELAY
+ { full_msg.cloned() } else { None };
let updated_channel_dir_info = DirectionalChannelInfo {
enabled: chan_enabled,
mod tests {
use chain;
use ln::features::{ChannelFeatures, InitFeatures, NodeFeatures};
- use routing::network_graph::{NetGraphMsgHandler, NetworkGraph};
+ use routing::network_graph::{NetGraphMsgHandler, NetworkGraph, MAX_EXCESS_BYTES_FOR_RELAY};
use ln::msgs::{Init, OptionalField, RoutingMessageHandler, UnsignedNodeAnnouncement, NodeAnnouncement,
UnsignedChannelAnnouncement, ChannelAnnouncement, UnsignedChannelUpdate, ChannelUpdate, HTLCFailChannelUpdate,
ReplyChannelRange, ReplyShortChannelIdsEnd, QueryChannelRange, QueryShortChannelIds, MAX_VALUE_MSAT};
};
unsigned_announcement.timestamp += 1000;
- unsigned_announcement.excess_data.push(1);
+ unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
let announcement_with_data = NodeAnnouncement {
signature: secp_ctx.sign(&msghash, node_1_privkey),
// Don't relay valid channels with excess data
unsigned_announcement.short_channel_id += 1;
- unsigned_announcement.excess_data.push(1);
+ unsigned_announcement.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
let valid_announcement = ChannelAnnouncement {
node_signature_1: secp_ctx.sign(&msghash, node_1_privkey),
}
unsigned_channel_update.timestamp += 100;
- unsigned_channel_update.excess_data.push(1);
+ unsigned_channel_update.excess_data.resize(MAX_EXCESS_BYTES_FOR_RELAY + 1, 0);
let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]);
let valid_channel_update = ChannelUpdate {
signature: secp_ctx.sign(&msghash, node_1_privkey),
htlc_maximum_msat: OptionalField::Absent,
fee_base_msat: 10000,
fee_proportional_millionths: 20,
- excess_data: [1; 3].to_vec()
+ excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec()
};
let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_channel_update.encode()[..])[..]);
let valid_channel_update = ChannelUpdate {
alias: [0; 32],
addresses: Vec::new(),
excess_address_data: Vec::new(),
- excess_data: [1; 3].to_vec(),
+ excess_data: [1; MAX_EXCESS_BYTES_FOR_RELAY + 1].to_vec(),
};
let msghash = hash_to_message!(&Sha256dHash::hash(&unsigned_announcement.encode()[..])[..]);
let valid_announcement = NodeAnnouncement {
{
let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange {
chain_hash,
- full_information: true,
+ sync_complete: true,
first_blocknum: 0,
number_of_blocks: 2000,
short_channel_ids: vec![
_ => panic!("expected MessageSendEvent::SendShortIdsQuery"),
}
}
-
- // Test receipt of a reply that indicates the remote node does not maintain up-to-date
- // information for the chain_hash. Because of discrepancies in implementation we use
- // full_information=false and short_channel_ids=[] as the signal.
- {
- // Handle the reply indicating the peer was unable to fulfill our request.
- let result = net_graph_msg_handler.handle_reply_channel_range(&node_id_1, ReplyChannelRange {
- chain_hash,
- full_information: false,
- first_blocknum: 1000,
- number_of_blocks: 100,
- short_channel_ids: vec![],
- });
- assert!(result.is_err());
- assert_eq!(result.err().unwrap().err, "Received reply_channel_range with no information available");
- }
}
#[test]
use util::logger::Logger;
use std::cmp;
- use std::collections::{HashMap,BinaryHeap};
+ use std::collections::{HashMap, BinaryHeap};
use std::ops::Deref;
/// A hop in a route
/// The channel_announcement features of the channel that should be used from the previous hop
/// to reach this node.
pub channel_features: ChannelFeatures,
- /// The fee taken on this hop. For the last hop, this should be the full value of the payment.
+ /// The fee taken on this hop (for paying for the use of the *next* channel in the path).
+ /// For the last hop, this should be the full value of the payment (might be more than
+ /// requested if we had to match htlc_minimum_msat).
pub fee_msat: u64,
/// The CLTV delta added for this hop. For the last hop, this should be the full CLTV value
/// expected at the destination, in excess of the current block height.
pub cltv_expiry_delta: u32,
}
+/// (C-not exported)
impl Writeable for Vec<RouteHop> {
fn write<W: ::util::ser::Writer>(&self, writer: &mut W) -> Result<(), ::std::io::Error> {
(self.len() as u8).write(writer)?;
}
}
+/// (C-not exported)
impl Readable for Vec<RouteHop> {
fn read<R: ::std::io::Read>(reader: &mut R) -> Result<Vec<RouteHop>, DecodeError> {
let hops_count: u8 = Readable::read(reader)?;
/// The difference in CLTV values between this node and the next node.
pub cltv_expiry_delta: u16,
/// The minimum value, in msat, which must be relayed to the next hop.
- pub htlc_minimum_msat: u64,
+ pub htlc_minimum_msat: Option<u64>,
+ /// The maximum value in msat available for routing with a single HTLC.
+ pub htlc_maximum_msat: Option<u64>,
}
#[derive(Eq, PartialEq)]
pubkey: PublicKey,
lowest_fee_to_peer_through_node: u64,
lowest_fee_to_node: u64,
+ // The maximum value a yet-to-be-constructed payment path might flow through this node.
+ // This value is upper-bounded by us by:
+ // - how much is needed for a path being constructed
+ // - how much value can channels following this node (up to the destination) can contribute,
+ // considering their capacity and fees
+ value_contribution_msat: u64
}
impl cmp::Ord for RouteGraphNode {
struct DummyDirectionalChannelInfo {
cltv_expiry_delta: u32,
htlc_minimum_msat: u64,
+ htlc_maximum_msat: Option<u64>,
fees: RoutingFees,
}
+ /// It's useful to keep track of the hops associated with the fees required to use them,
+ /// so that we can choose cheaper paths (as per Dijkstra's algorithm).
+ /// Fee values should be updated only in the context of the whole path, see update_value_and_recompute_fees.
+ /// These fee values are useful to choose hops as we traverse the graph "payee-to-payer".
+ #[derive(Clone)]
+ struct PathBuildingHop {
+ /// Hop-specific details unrelated to the path during the routing phase,
+ /// but rather relevant to the LN graph.
+ route_hop: RouteHop,
+ /// Minimal fees required to route to the source node of the current hop via any of its inbound channels.
+ src_lowest_inbound_fees: RoutingFees,
+ /// Fees of the channel used in this hop.
+ channel_fees: RoutingFees,
+ /// All the fees paid *after* this channel on the way to the destination
+ next_hops_fee_msat: u64,
+ /// Fee paid for the use of the current channel (see channel_fees).
+ /// The value will be actually deducted from the counterparty balance on the previous link.
+ hop_use_fee_msat: u64,
+ /// Used to compare channels when choosing the for routing.
+ /// Includes paying for the use of a hop and the following hops, as well as
+ /// an estimated cost of reaching this hop.
+ /// Might get stale when fees are recomputed. Primarily for internal use.
+ total_fee_msat: u64,
+ /// This is useful for update_value_and_recompute_fees to make sure
+ /// we don't fall below the minimum. Should not be updated manually and
+ /// generally should not be accessed.
+ htlc_minimum_msat: u64,
+ }
+
+ // Instantiated with a list of hops with correct data in them collected during path finding,
+ // an instance of this struct should be further modified only via given methods.
+ #[derive(Clone)]
+ struct PaymentPath {
+ hops: Vec<PathBuildingHop>,
+ }
+
+ impl PaymentPath {
+
+ // TODO: Add a value_msat field to PaymentPath and use it instead of this function.
+ fn get_value_msat(&self) -> u64 {
+ self.hops.last().unwrap().route_hop.fee_msat
+ }
+
+ fn get_total_fee_paid_msat(&self) -> u64 {
+ if self.hops.len() < 1 {
+ return 0;
+ }
+ let mut result = 0;
+ // Can't use next_hops_fee_msat because it gets outdated.
+ for (i, hop) in self.hops.iter().enumerate() {
+ if i != self.hops.len() - 1 {
+ result += hop.route_hop.fee_msat;
+ }
+ }
+ return result;
+ }
+
+ // If the amount transferred by the path is updated, the fees should be adjusted. Any other way
+ // to change fees may result in an inconsistency.
+ //
+ // Sometimes we call this function right after constructing a path which has inconsistent
+ // (in terms of reaching htlc_minimum_msat), so that this function puts the fees in order.
+ // In that case we call it on the "same" amount we initially allocated for this path, and which
+ // could have been reduced on the way. In that case, there is also a risk of exceeding
+ // available_liquidity inside this function, because the function is unaware of this bound.
+ // In our specific recomputation cases where we never increase the value the risk is pretty low.
+ // This function, however, does not support arbitrarily increasing the value being transferred,
+ // and the exception will be triggered.
+ fn update_value_and_recompute_fees(&mut self, value_msat: u64) {
+ assert!(value_msat <= self.hops.last().unwrap().route_hop.fee_msat);
+
+ let mut total_fee_paid_msat = 0 as u64;
+ for i in (0..self.hops.len()).rev() {
+ let last_hop = i == self.hops.len() - 1;
+
+ // For non-last-hop, this value will represent the fees paid on the current hop. It
+ // will consist of the fees for the use of the next hop, and extra fees to match
+ // htlc_minimum_msat of the current channel. Last hop is handled separately.
+ let mut cur_hop_fees_msat = 0;
+ if !last_hop {
+ cur_hop_fees_msat = self.hops.get(i + 1).unwrap().hop_use_fee_msat;
+ }
+
+ let mut cur_hop = self.hops.get_mut(i).unwrap();
+ cur_hop.next_hops_fee_msat = total_fee_paid_msat;
+ // Overpay in fees if we can't save these funds due to htlc_minimum_msat.
+ // We try to account for htlc_minimum_msat in scoring (add_entry!), so that nodes don't
+ // set it too high just to maliciously take more fees by exploiting this
+ // match htlc_minimum_msat logic.
+ let mut cur_hop_transferred_amount_msat = total_fee_paid_msat + value_msat;
+ if let Some(extra_fees_msat) = cur_hop.htlc_minimum_msat.checked_sub(cur_hop_transferred_amount_msat) {
+ // Note that there is a risk that *previous hops* (those closer to us, as we go
+ // payee->our_node here) would exceed their htlc_maximum_msat or available balance.
+ //
+ // This might make us end up with a broken route, although this should be super-rare
+ // in practice, both because of how healthy channels look like, and how we pick
+ // channels in add_entry.
+ // Also, this can't be exploited more heavily than *announce a free path and fail
+ // all payments*.
+ cur_hop_transferred_amount_msat += extra_fees_msat;
+ total_fee_paid_msat += extra_fees_msat;
+ cur_hop_fees_msat += extra_fees_msat;
+ }
+
+ if last_hop {
+ // Final hop is a special case: it usually has just value_msat (by design), but also
+ // it still could overpay for the htlc_minimum_msat.
+ cur_hop.route_hop.fee_msat = cur_hop_transferred_amount_msat;
+ } else {
+ // Propagate updated fees for the use of the channels to one hop back, where they
+ // will be actually paid (fee_msat). The last hop is handled above separately.
+ cur_hop.route_hop.fee_msat = cur_hop_fees_msat;
+ }
+
+ // Fee for the use of the current hop which will be deducted on the previous hop.
+ // Irrelevant for the first hop, as it doesn't have the previous hop, and the use of
+ // this channel is free for us.
+ if i != 0 {
+ if let Some(new_fee) = compute_fees(cur_hop_transferred_amount_msat, cur_hop.channel_fees) {
+ cur_hop.hop_use_fee_msat = new_fee;
+ total_fee_paid_msat += new_fee;
+ } else {
+ // It should not be possible because this function is called only to reduce the
+ // value. In that case, compute_fee was already called with the same fees for
+ // larger amount and there was no overflow.
+ unreachable!();
+ }
+ }
+ }
+ }
+ }
+
+ fn compute_fees(amount_msat: u64, channel_fees: RoutingFees) -> Option<u64> {
+ let proportional_fee_millions =
+ amount_msat.checked_mul(channel_fees.proportional_millionths as u64);
+ if let Some(new_fee) = proportional_fee_millions.and_then(|part| {
+ (channel_fees.base_msat as u64).checked_add(part / 1_000_000) }) {
+
+ Some(new_fee)
+ } else {
+ // This function may be (indirectly) called without any verification,
+ // with channel_fees provided by a caller. We should handle it gracefully.
+ None
+ }
+ }
- /// Gets a route from us to the given target node.
+ /// Gets a route from us (payer) to the given target node (payee).
///
/// Extra routing hops between known nodes and the target will be used if they are included in
/// last_hops.
///
/// If some channels aren't announced, it may be useful to fill in a first_hops with the
/// results from a local ChannelManager::list_usable_channels() call. If it is filled in, our
- /// view of our local channels (from net_graph_msg_handler) will be ignored, and only those in first_hops
- /// will be used.
+ /// view of our local channels (from net_graph_msg_handler) will be ignored, and only those
+ /// in first_hops will be used.
///
/// Panics if first_hops contains channels without short_channel_ids
/// (ChannelManager::list_usable_channels will never include such channels).
///
/// The fees on channels from us to next-hops are ignored (as they are assumed to all be
- /// equal), however the enabled/disabled bit on such channels as well as the htlc_minimum_msat
- /// *is* checked as they may change based on the receiving node.
- pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, target: &PublicKey, first_hops: Option<&[&ChannelDetails]>,
+ /// equal), however the enabled/disabled bit on such channels as well as the
+ /// htlc_minimum_msat/htlc_maximum_msat *are* checked as they may change based on the receiving node.
+ pub fn get_route<L: Deref>(our_node_id: &PublicKey, network: &NetworkGraph, payee: &PublicKey, first_hops: Option<&[&ChannelDetails]>,
last_hops: &[&RouteHint], final_value_msat: u64, final_cltv: u32, logger: L) -> Result<Route, LightningError> where L::Target: Logger {
// TODO: Obviously *only* using total fee cost sucks. We should consider weighting by
// uptime/success in using a node in the past.
- if *target == *our_node_id {
+ if *payee == *our_node_id {
return Err(LightningError{err: "Cannot generate a route to ourselves".to_owned(), action: ErrorAction::IgnoreError});
}
return Err(LightningError{err: "Cannot generate a route of more value than all existing satoshis".to_owned(), action: ErrorAction::IgnoreError});
}
- // We do a dest-to-source Dijkstra's sorting by each node's distance from the destination
+ if final_value_msat == 0 {
+ return Err(LightningError{err: "Cannot send a payment of 0 msat".to_owned(), action: ErrorAction::IgnoreError});
+ }
+
+ for last_hop in last_hops {
+ if last_hop.src_node_id == *payee {
+ return Err(LightningError{err: "Last hop cannot have a payee as a source.".to_owned(), action: ErrorAction::IgnoreError});
+ }
+ }
+
+ // The general routing idea is the following:
+ // 1. Fill first/last hops communicated by the caller.
+ // 2. Attempt to construct a path from payer to payee for transferring
+ // any ~sufficient (described later) value.
+ // If succeed, remember which channels were used and how much liquidity they have available,
+ // so that future paths don't rely on the same liquidity.
+ // 3. Prooceed to the next step if:
+ // - we hit the recommended target value;
+ // - OR if we could not construct a new path. Any next attempt will fail too.
+ // Otherwise, repeat step 2.
+ // 4. See if we managed to collect paths which aggregately are able to transfer target value
+ // (not recommended value). If yes, proceed. If not, fail routing.
+ // 5. Randomly combine paths into routes having enough to fulfill the payment. (TODO: knapsack)
+ // 6. Of all the found paths, select only those with the lowest total fee.
+ // 7. The last path in every selected route is likely to be more than we need.
+ // Reduce its value-to-transfer and recompute fees.
+ // 8. Choose the best route by the lowest total fee.
+
+ // As for the actual search algorithm,
+ // we do a payee-to-payer Dijkstra's sorting by each node's distance from the payee
// plus the minimum per-HTLC fee to get from it to another node (aka "shitty A*").
// TODO: There are a few tweaks we could do, including possibly pre-calculating more stuff
// to use as the A* heuristic beyond just the cost to get one node further than the current
let dummy_directional_info = DummyDirectionalChannelInfo { // used for first_hops routes
cltv_expiry_delta: 0,
htlc_minimum_msat: 0,
+ htlc_maximum_msat: None,
fees: RoutingFees {
base_msat: 0,
proportional_millionths: 0,
let mut targets = BinaryHeap::new(); //TODO: Do we care about switching to eg Fibbonaci heap?
let mut dist = HashMap::with_capacity(network.get_nodes().len());
+ // When arranging a route, we select multiple paths so that we can make a multi-path payment.
+ // Don't stop searching for paths when we think they're
+ // sufficient to transfer a given value aggregately.
+ // Search for higher value, so that we collect many more paths,
+ // and then select the best combination among them.
+ const ROUTE_CAPACITY_PROVISION_FACTOR: u64 = 3;
+ let recommended_value_msat = final_value_msat * ROUTE_CAPACITY_PROVISION_FACTOR as u64;
+
+ // Step (1).
+ // Prepare the data we'll use for payee-to-payer search by
+ // inserting first hops suggested by the caller as targets.
+ // Our search will then attempt to reach them while traversing from the payee node.
let mut first_hop_targets = HashMap::with_capacity(if first_hops.is_some() { first_hops.as_ref().unwrap().len() } else { 0 });
if let Some(hops) = first_hops {
for chan in hops {
let short_channel_id = chan.short_channel_id.expect("first_hops should be filled in with usable channels, not pending ones");
- if chan.remote_network_id == *target {
- return Ok(Route {
- paths: vec![vec![RouteHop {
- pubkey: chan.remote_network_id,
- node_features: chan.counterparty_features.to_context(),
- short_channel_id,
- channel_features: chan.counterparty_features.to_context(),
- fee_msat: final_value_msat,
- cltv_expiry_delta: final_cltv,
- }]],
- });
+ if chan.remote_network_id == *our_node_id {
+ return Err(LightningError{err: "First hop cannot have our_node_id as a destination.".to_owned(), action: ErrorAction::IgnoreError});
}
- first_hop_targets.insert(chan.remote_network_id, (short_channel_id, chan.counterparty_features.clone()));
+ first_hop_targets.insert(chan.remote_network_id, (short_channel_id, chan.counterparty_features.clone(), chan.outbound_capacity_msat));
}
if first_hop_targets.is_empty() {
return Err(LightningError{err: "Cannot route when there are no outbound routes away from us".to_owned(), action: ErrorAction::IgnoreError});
}
}
+ // We don't want multiple paths (as per MPP) share liquidity of the same channels.
+ // This map allows paths to be aware of the channel use by other paths in the same call.
+ // This would help to make a better path finding decisions and not "overbook" channels.
+ // It is unaware of the directions (except for `outbound_capacity_msat` in `first_hops`).
+ let mut bookkeeped_channels_liquidity_available_msat = HashMap::new();
+
+ // Keeping track of how much value we already collected across other paths. Helps to decide:
+ // - how much a new path should be transferring (upper bound);
+ // - whether a channel should be disregarded because
+ // it's available liquidity is too small comparing to how much more we need to collect;
+ // - when we want to stop looking for new paths.
+ let mut already_collected_value_msat = 0;
+
macro_rules! add_entry {
// Adds entry which goes from $src_node_id to $dest_node_id
// over the channel with id $chan_id with fees described in
// $directional_info.
- ( $chan_id: expr, $src_node_id: expr, $dest_node_id: expr, $directional_info: expr, $chan_features: expr, $starting_fee_msat: expr ) => {
- //TODO: Explore simply adding fee to hit htlc_minimum_msat
- if $starting_fee_msat as u64 + final_value_msat >= $directional_info.htlc_minimum_msat {
- let proportional_fee_millions = ($starting_fee_msat + final_value_msat).checked_mul($directional_info.fees.proportional_millionths as u64);
- if let Some(new_fee) = proportional_fee_millions.and_then(|part| {
- ($directional_info.fees.base_msat as u64).checked_add(part / 1000000) })
- {
- let mut total_fee = $starting_fee_msat as u64;
- let hm_entry = dist.entry(&$src_node_id);
- let old_entry = hm_entry.or_insert_with(|| {
- let mut fee_base_msat = u32::max_value();
- let mut fee_proportional_millionths = u32::max_value();
- if let Some(fees) = network.get_nodes().get(&$src_node_id).and_then(|node| node.lowest_inbound_channel_fees) {
- fee_base_msat = fees.base_msat;
- fee_proportional_millionths = fees.proportional_millionths;
- }
- (u64::max_value(),
- fee_base_msat,
- fee_proportional_millionths,
- RouteHop {
- pubkey: $dest_node_id.clone(),
- node_features: NodeFeatures::empty(),
- short_channel_id: 0,
- channel_features: $chan_features.clone(),
- fee_msat: 0,
- cltv_expiry_delta: 0,
- },
- )
- });
- if $src_node_id != *our_node_id {
- // Ignore new_fee for channel-from-us as we assume all channels-from-us
- // will have the same effective-fee
- total_fee += new_fee;
- if let Some(fee_inc) = final_value_msat.checked_add(total_fee).and_then(|inc| { (old_entry.2 as u64).checked_mul(inc) }) {
- total_fee += fee_inc / 1000000 + (old_entry.1 as u64);
+ // $next_hops_fee_msat represents the fees paid for using all the channel *after* this one,
+ // since that value has to be transferred over this channel.
+ ( $chan_id: expr, $src_node_id: expr, $dest_node_id: expr, $directional_info: expr, $capacity_sats: expr, $chan_features: expr, $next_hops_fee_msat: expr,
+ $next_hops_value_contribution: expr ) => {
+ // Channels to self should not be used. This is more of belt-and-suspenders, because in
+ // practice these cases should be caught earlier:
+ // - for regular channels at channel announcement (TODO)
+ // - for first and last hops early in get_route
+ if $src_node_id != $dest_node_id.clone() {
+ let available_liquidity_msat = bookkeeped_channels_liquidity_available_msat.entry($chan_id.clone()).or_insert_with(|| {
+ let mut initial_liquidity_available_msat = None;
+ if let Some(capacity_sats) = $capacity_sats {
+ initial_liquidity_available_msat = Some(capacity_sats * 1000);
+ }
+
+ if let Some(htlc_maximum_msat) = $directional_info.htlc_maximum_msat {
+ if let Some(available_msat) = initial_liquidity_available_msat {
+ initial_liquidity_available_msat = Some(cmp::min(available_msat, htlc_maximum_msat));
} else {
- // max_value means we'll always fail the old_entry.0 > total_fee check
- total_fee = u64::max_value();
+ initial_liquidity_available_msat = Some(htlc_maximum_msat);
}
}
- let new_graph_node = RouteGraphNode {
- pubkey: $src_node_id,
- lowest_fee_to_peer_through_node: total_fee,
- lowest_fee_to_node: $starting_fee_msat as u64 + new_fee,
+
+ match initial_liquidity_available_msat {
+ Some(available_msat) => available_msat,
+ // We assume channels with unknown balance have
+ // a capacity of 0.0025 BTC (or 250_000 sats).
+ None => 250_000 * 1000
+ }
+ });
+
+ // It is tricky to substract $next_hops_fee_msat from available liquidity here.
+ // It may be misleading because we might later choose to reduce the value transferred
+ // over these channels, and the channel which was insufficient might become sufficient.
+ // Worst case: we drop a good channel here because it can't cover the high following
+ // fees caused by one expensive channel, but then this channel could have been used
+ // if the amount being transferred over this path is lower.
+ // We do this for now, but this is a subject for removal.
+ if let Some(available_value_contribution_msat) = available_liquidity_msat.checked_sub($next_hops_fee_msat) {
+
+ // Routing Fragmentation Mitigation heuristic:
+ //
+ // Routing fragmentation across many payment paths increases the overall routing
+ // fees as you have irreducible routing fees per-link used (`fee_base_msat`).
+ // Taking too many smaller paths also increases the chance of payment failure.
+ // Thus to avoid this effect, we require from our collected links to provide
+ // at least a minimal contribution to the recommended value yet-to-be-fulfilled.
+ //
+ // This requirement is currently 5% of the remaining-to-be-collected value.
+ // This means as we successfully advance in our collection,
+ // the absolute liquidity contribution is lowered,
+ // thus increasing the number of potential channels to be selected.
+
+ // Derive the minimal liquidity contribution with a ratio of 20 (5%, rounded up).
+ let minimal_value_contribution_msat: u64 = (recommended_value_msat - already_collected_value_msat + 19) / 20;
+ // Verify the liquidity offered by this channel complies to the minimal contribution.
+ let contributes_sufficient_value = available_value_contribution_msat >= minimal_value_contribution_msat;
+
+ let value_contribution_msat = cmp::min(available_value_contribution_msat, $next_hops_value_contribution);
+ // Includes paying fees for the use of the following channels.
+ let amount_to_transfer_over_msat: u64 = match value_contribution_msat.checked_add($next_hops_fee_msat) {
+ Some(result) => result,
+ // Can't overflow due to how the values were computed right above.
+ None => unreachable!(),
};
- if old_entry.0 > total_fee {
- targets.push(new_graph_node);
- old_entry.0 = total_fee;
- old_entry.3 = RouteHop {
- pubkey: $dest_node_id.clone(),
- node_features: NodeFeatures::empty(),
- short_channel_id: $chan_id.clone(),
- channel_features: $chan_features.clone(),
- fee_msat: new_fee, // This field is ignored on the last-hop anyway
- cltv_expiry_delta: $directional_info.cltv_expiry_delta as u32,
+
+ // If HTLC minimum is larger than the amount we're going to transfer, we shouldn't
+ // bother considering this channel.
+ // Since we're choosing amount_to_transfer_over_msat as maximum possible, it can
+ // be only reduced later (not increased), so this channel should just be skipped
+ // as not sufficient.
+ // TODO: Explore simply adding fee to hit htlc_minimum_msat
+ if contributes_sufficient_value && amount_to_transfer_over_msat >= $directional_info.htlc_minimum_msat {
+ // Note that low contribution here (limited by available_liquidity_msat)
+ // might violate htlc_minimum_msat on the hops which are next along the
+ // payment path (upstream to the payee). To avoid that, we recompute path
+ // path fees knowing the final path contribution after constructing it.
+ let hm_entry = dist.entry(&$src_node_id);
+ let old_entry = hm_entry.or_insert_with(|| {
+ // If there was previously no known way to access
+ // the source node (recall it goes payee-to-payer) of $chan_id, first add
+ // a semi-dummy record just to compute the fees to reach the source node.
+ // This will affect our decision on selecting $chan_id
+ // as a way to reach the $dest_node_id.
+ let mut fee_base_msat = u32::max_value();
+ let mut fee_proportional_millionths = u32::max_value();
+ if let Some(Some(fees)) = network.get_nodes().get(&$src_node_id).map(|node| node.lowest_inbound_channel_fees) {
+ fee_base_msat = fees.base_msat;
+ fee_proportional_millionths = fees.proportional_millionths;
+ }
+ PathBuildingHop {
+ route_hop: RouteHop {
+ pubkey: $dest_node_id.clone(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: 0,
+ channel_features: $chan_features.clone(),
+ fee_msat: 0,
+ cltv_expiry_delta: 0,
+ },
+ src_lowest_inbound_fees: RoutingFees {
+ base_msat: fee_base_msat,
+ proportional_millionths: fee_proportional_millionths,
+ },
+ channel_fees: $directional_info.fees,
+ next_hops_fee_msat: u64::max_value(),
+ hop_use_fee_msat: u64::max_value(),
+ total_fee_msat: u64::max_value(),
+ htlc_minimum_msat: $directional_info.htlc_minimum_msat,
+ }
+ });
+
+ let mut hop_use_fee_msat = 0;
+ let mut total_fee_msat = $next_hops_fee_msat;
+
+ // Ignore hop_use_fee_msat for channel-from-us as we assume all channels-from-us
+ // will have the same effective-fee
+ if $src_node_id != *our_node_id {
+ match compute_fees(amount_to_transfer_over_msat, $directional_info.fees) {
+ // max_value means we'll always fail
+ // the old_entry.total_fee_msat > total_fee_msat check
+ None => total_fee_msat = u64::max_value(),
+ Some(fee_msat) => {
+ hop_use_fee_msat = fee_msat;
+ total_fee_msat += hop_use_fee_msat;
+ if let Some(prev_hop_fee_msat) = compute_fees(total_fee_msat + amount_to_transfer_over_msat,
+ old_entry.src_lowest_inbound_fees) {
+ if let Some(incremented_total_fee_msat) = total_fee_msat.checked_add(prev_hop_fee_msat) {
+ total_fee_msat = incremented_total_fee_msat;
+ }
+ else {
+ // max_value means we'll always fail
+ // the old_entry.total_fee_msat > total_fee_msat check
+ total_fee_msat = u64::max_value();
+ }
+ } else {
+ // max_value means we'll always fail
+ // the old_entry.total_fee_msat > total_fee_msat check
+ total_fee_msat = u64::max_value();
+ }
+ }
+ }
+ }
+
+ let new_graph_node = RouteGraphNode {
+ pubkey: $src_node_id,
+ lowest_fee_to_peer_through_node: total_fee_msat,
+ lowest_fee_to_node: $next_hops_fee_msat as u64 + hop_use_fee_msat,
+ value_contribution_msat: value_contribution_msat,
+ };
+
+ // Update the way of reaching $src_node_id with the given $chan_id (from $dest_node_id),
+ // if this way is cheaper than the already known
+ // (considering the cost to "reach" this channel from the route destination,
+ // the cost of using this channel,
+ // and the cost of routing to the source node of this channel).
+ // Also, consider that htlc_minimum_msat_difference, because we might end up
+ // paying it. Consider the following exploit:
+ // we use 2 paths to transfer 1.5 BTC. One of them is 0-fee normal 1 BTC path,
+ // and for the other one we picked a 1sat-fee path with htlc_minimum_msat of
+ // 1 BTC. Now, since the latter is more expensive, we gonna try to cut it
+ // by 0.5 BTC, but then match htlc_minimum_msat by paying a fee of 0.5 BTC
+ // to this channel.
+ // TODO: this scoring could be smarter (e.g. 0.5*htlc_minimum_msat here).
+ let mut old_cost = old_entry.total_fee_msat;
+ if let Some(increased_old_cost) = old_cost.checked_add(old_entry.htlc_minimum_msat) {
+ old_cost = increased_old_cost;
+ } else {
+ old_cost = u64::max_value();
+ }
+
+ let mut new_cost = total_fee_msat;
+ if let Some(increased_new_cost) = new_cost.checked_add($directional_info.htlc_minimum_msat) {
+ new_cost = increased_new_cost;
+ } else {
+ new_cost = u64::max_value();
+ }
+
+ if new_cost < old_cost {
+ targets.push(new_graph_node);
+ old_entry.next_hops_fee_msat = $next_hops_fee_msat;
+ old_entry.hop_use_fee_msat = hop_use_fee_msat;
+ old_entry.total_fee_msat = total_fee_msat;
+ old_entry.route_hop = RouteHop {
+ pubkey: $dest_node_id.clone(),
+ node_features: NodeFeatures::empty(),
+ short_channel_id: $chan_id.clone(),
+ channel_features: $chan_features.clone(),
+ fee_msat: 0, // This value will be later filled with hop_use_fee_msat of the following channel
+ cltv_expiry_delta: $directional_info.cltv_expiry_delta as u32,
+ };
+ old_entry.channel_fees = $directional_info.fees;
+ // It's probably fine to replace the old entry, because the new one
+ // passed the htlc_minimum-related checks above.
+ old_entry.htlc_minimum_msat = $directional_info.htlc_minimum_msat;
}
}
}
};
}
+ // Find ways (channels with destination) to reach a given node and store them
+ // in the corresponding data structures (routing graph etc).
+ // $fee_to_target_msat represents how much it costs to reach to this node from the payee,
+ // meaning how much will be paid in fees after this node (to the best of our knowledge).
+ // This data can later be helpful to optimize routing (pay lower fees).
macro_rules! add_entries_to_cheapest_to_target_node {
- ( $node: expr, $node_id: expr, $fee_to_target_msat: expr ) => {
+ ( $node: expr, $node_id: expr, $fee_to_target_msat: expr, $next_hops_value_contribution: expr ) => {
if first_hops.is_some() {
- if let Some(&(ref first_hop, ref features)) = first_hop_targets.get(&$node_id) {
- add_entry!(first_hop, *our_node_id, $node_id, dummy_directional_info, features.to_context(), $fee_to_target_msat);
+ if let Some(&(ref first_hop, ref features, ref outbound_capacity_msat)) = first_hop_targets.get(&$node_id) {
+ add_entry!(first_hop, *our_node_id, $node_id, dummy_directional_info, Some(outbound_capacity_msat / 1000), features.to_context(), $fee_to_target_msat, $next_hops_value_contribution);
}
}
if first_hops.is_none() || chan.node_two != *our_node_id {
if let Some(two_to_one) = chan.two_to_one.as_ref() {
if two_to_one.enabled {
- add_entry!(chan_id, chan.node_two, chan.node_one, two_to_one, chan.features, $fee_to_target_msat);
+ add_entry!(chan_id, chan.node_two, chan.node_one, two_to_one, chan.capacity_sats, chan.features, $fee_to_target_msat, $next_hops_value_contribution);
}
}
}
if first_hops.is_none() || chan.node_one != *our_node_id {
if let Some(one_to_two) = chan.one_to_two.as_ref() {
if one_to_two.enabled {
- add_entry!(chan_id, chan.node_one, chan.node_two, one_to_two, chan.features, $fee_to_target_msat);
+ add_entry!(chan_id, chan.node_one, chan.node_two, one_to_two, chan.capacity_sats, chan.features, $fee_to_target_msat, $next_hops_value_contribution);
}
}
};
}
- match network.get_nodes().get(target) {
- None => {},
- Some(node) => {
- add_entries_to_cheapest_to_target_node!(node, target, 0);
- },
- }
+ let mut payment_paths = Vec::<PaymentPath>::new();
+
+ // TODO: diversify by nodes (so that all paths aren't doomed if one node is offline).
+ 'paths_collection: loop {
+ // For every new path, start from scratch, except
+ // bookkeeped_channels_liquidity_available_msat, which will improve
+ // the further iterations of path finding. Also don't erase first_hop_targets.
+ targets.clear();
+ dist.clear();
+
+ // If first hop is a private channel and the only way to reach the payee, this is the only
+ // place where it could be added.
+ if first_hops.is_some() {
+ if let Some(&(ref first_hop, ref features, ref outbound_capacity_msat)) = first_hop_targets.get(&payee) {
+ add_entry!(first_hop, *our_node_id, payee, dummy_directional_info, Some(outbound_capacity_msat / 1000), features.to_context(), 0, recommended_value_msat);
+ }
+ }
- for hop in last_hops.iter() {
- let have_hop_src_in_graph =
- if let Some(&(ref first_hop, ref features)) = first_hop_targets.get(&hop.src_node_id) {
- // If this hop connects to a node with which we have a direct channel, ignore the
- // network graph and add both the hop and our direct channel to the candidate set:
- //
- // Currently there are no channel-context features defined, so we are a
- // bit lazy here. In the future, we should pull them out via our
- // ChannelManager, but there's no reason to waste the space until we
- // need them.
- add_entry!(first_hop, *our_node_id , hop.src_node_id, dummy_directional_info, features.to_context(), 0);
- true
- } else {
- // In any other case, only add the hop if the source is in the regular network
- // graph:
- network.get_nodes().get(&hop.src_node_id).is_some()
- };
- if have_hop_src_in_graph {
- // BOLT 11 doesn't allow inclusion of features for the last hop hints, which
- // really sucks, cause we're gonna need that eventually.
- add_entry!(hop.short_channel_id, hop.src_node_id, target, hop, ChannelFeatures::empty(), 0);
+ // Add the payee as a target, so that the payee-to-payer
+ // search algorithm knows what to start with.
+ match network.get_nodes().get(payee) {
+ // The payee is not in our network graph, so nothing to add here.
+ // There is still a chance of reaching them via last_hops though,
+ // so don't yet fail the payment here.
+ // If not, targets.pop() will not even let us enter the loop in step 2.
+ None => {},
+ Some(node) => {
+ add_entries_to_cheapest_to_target_node!(node, payee, 0, recommended_value_msat);
+ },
}
- }
- while let Some(RouteGraphNode { pubkey, lowest_fee_to_node, .. }) = targets.pop() {
- if pubkey == *our_node_id {
- let mut res = vec!(dist.remove(&our_node_id).unwrap().3);
- loop {
- if let Some(&(_, ref features)) = first_hop_targets.get(&res.last().unwrap().pubkey) {
- res.last_mut().unwrap().node_features = features.to_context();
- } else if let Some(node) = network.get_nodes().get(&res.last().unwrap().pubkey) {
- if let Some(node_info) = node.announcement_info.as_ref() {
- res.last_mut().unwrap().node_features = node_info.features.clone();
+ // Step (1).
+ // If a caller provided us with last hops, add them to routing targets. Since this happens
+ // earlier than general path finding, they will be somewhat prioritized, although currently
+ // it matters only if the fees are exactly the same.
+ for hop in last_hops.iter() {
+ let have_hop_src_in_graph =
+ if let Some(&(ref first_hop, ref features, ref outbound_capacity_msat)) = first_hop_targets.get(&hop.src_node_id) {
+ // If this hop connects to a node with which we have a direct channel, ignore
+ // the network graph and add both the hop and our direct channel to
+ // the candidate set.
+ //
+ // Currently there are no channel-context features defined, so we are a
+ // bit lazy here. In the future, we should pull them out via our
+ // ChannelManager, but there's no reason to waste the space until we
+ // need them.
+ add_entry!(first_hop, *our_node_id , hop.src_node_id, dummy_directional_info, Some(outbound_capacity_msat / 1000), features.to_context(), 0, recommended_value_msat);
+ true
+ } else {
+ // In any other case, only add the hop if the source is in the regular network
+ // graph:
+ network.get_nodes().get(&hop.src_node_id).is_some()
+ };
+ if have_hop_src_in_graph {
+ // BOLT 11 doesn't allow inclusion of features for the last hop hints, which
+ // really sucks, cause we're gonna need that eventually.
+ let last_hop_htlc_minimum_msat: u64 = match hop.htlc_minimum_msat {
+ Some(htlc_minimum_msat) => htlc_minimum_msat,
+ None => 0
+ };
+ let directional_info = DummyDirectionalChannelInfo {
+ cltv_expiry_delta: hop.cltv_expiry_delta as u32,
+ htlc_minimum_msat: last_hop_htlc_minimum_msat,
+ htlc_maximum_msat: hop.htlc_maximum_msat,
+ fees: hop.fees,
+ };
+ add_entry!(hop.short_channel_id, hop.src_node_id, payee, directional_info, None::<u64>, ChannelFeatures::empty(), 0, recommended_value_msat);
+ }
+ }
+
+ // At this point, targets are filled with the data from first and
+ // last hops communicated by the caller, and the payment receiver.
+ let mut found_new_path = false;
+
+ // Step (2).
+ // If this loop terminates due the exhaustion of targets, two situations are possible:
+ // - not enough outgoing liquidity:
+ // 0 < already_collected_value_msat < final_value_msat
+ // - enough outgoing liquidity:
+ // final_value_msat <= already_collected_value_msat < recommended_value_msat
+ // Both these cases (and other cases except reaching recommended_value_msat) mean that
+ // paths_collection will be stopped because found_new_path==false.
+ // This is not necessarily a routing failure.
+ 'path_construction: while let Some(RouteGraphNode { pubkey, lowest_fee_to_node, value_contribution_msat, .. }) = targets.pop() {
+
+ // Since we're going payee-to-payer, hitting our node as a target means we should stop
+ // traversing the graph and arrange the path out of what we found.
+ if pubkey == *our_node_id {
+ let mut new_entry = dist.remove(&our_node_id).unwrap();
+ let mut ordered_hops = vec!(new_entry.clone());
+
+ 'path_walk: loop {
+ if let Some(&(_, ref features, _)) = first_hop_targets.get(&ordered_hops.last().unwrap().route_hop.pubkey) {
+ ordered_hops.last_mut().unwrap().route_hop.node_features = features.to_context();
+ } else if let Some(node) = network.get_nodes().get(&ordered_hops.last().unwrap().route_hop.pubkey) {
+ if let Some(node_info) = node.announcement_info.as_ref() {
+ ordered_hops.last_mut().unwrap().route_hop.node_features = node_info.features.clone();
+ } else {
+ ordered_hops.last_mut().unwrap().route_hop.node_features = NodeFeatures::empty();
+ }
} else {
- res.last_mut().unwrap().node_features = NodeFeatures::empty();
+ // We should be able to fill in features for everything except the last
+ // hop, if the last hop was provided via a BOLT 11 invoice (though we
+ // should be able to extend it further as BOLT 11 does have feature
+ // flags for the last hop node itself).
+ assert!(ordered_hops.last().unwrap().route_hop.pubkey == *payee);
}
- } else {
- // We should be able to fill in features for everything except the last
- // hop, if the last hop was provided via a BOLT 11 invoice (though we
- // should be able to extend it further as BOLT 11 does have feature
- // flags for the last hop node itself).
- assert!(res.last().unwrap().pubkey == *target);
+
+ // Means we succesfully traversed from the payer to the payee, now
+ // save this path for the payment route. Also, update the liquidity
+ // remaining on the used hops, so that we take them into account
+ // while looking for more paths.
+ if ordered_hops.last().unwrap().route_hop.pubkey == *payee {
+ break 'path_walk;
+ }
+
+ new_entry = match dist.remove(&ordered_hops.last().unwrap().route_hop.pubkey) {
+ Some(payment_hop) => payment_hop,
+ // We can't arrive at None because, if we ever add an entry to targets,
+ // we also fill in the entry in dist (see add_entry!).
+ None => unreachable!(),
+ };
+ // We "propagate" the fees one hop backward (topologically) here,
+ // so that fees paid for a HTLC forwarding on the current channel are
+ // associated with the previous channel (where they will be subtracted).
+ ordered_hops.last_mut().unwrap().route_hop.fee_msat = new_entry.hop_use_fee_msat;
+ ordered_hops.last_mut().unwrap().route_hop.cltv_expiry_delta = new_entry.route_hop.cltv_expiry_delta;
+ ordered_hops.push(new_entry.clone());
}
- if res.last().unwrap().pubkey == *target {
- break;
+ ordered_hops.last_mut().unwrap().route_hop.fee_msat = value_contribution_msat;
+ ordered_hops.last_mut().unwrap().hop_use_fee_msat = 0;
+ ordered_hops.last_mut().unwrap().route_hop.cltv_expiry_delta = final_cltv;
+
+ let mut payment_path = PaymentPath {hops: ordered_hops};
+
+ // We could have possibly constructed a slightly inconsistent path: since we reduce
+ // value being transferred along the way, we could have violated htlc_minimum_msat
+ // on some channels we already passed (assuming dest->source direction). Here, we
+ // recompute the fees again, so that if that's the case, we match the currently
+ // underpaid htlc_minimum_msat with fees.
+ payment_path.update_value_and_recompute_fees(value_contribution_msat);
+
+ // Since a path allows to transfer as much value as
+ // the smallest channel it has ("bottleneck"), we should recompute
+ // the fees so sender HTLC don't overpay fees when traversing
+ // larger channels than the bottleneck. This may happen because
+ // when we were selecting those channels we were not aware how much value
+ // this path will transfer, and the relative fee for them
+ // might have been computed considering a larger value.
+ // Remember that we used these channels so that we don't rely
+ // on the same liquidity in future paths.
+ for payment_hop in payment_path.hops.iter() {
+ let channel_liquidity_available_msat = bookkeeped_channels_liquidity_available_msat.get_mut(&payment_hop.route_hop.short_channel_id).unwrap();
+ let mut spent_on_hop_msat = value_contribution_msat;
+ let next_hops_fee_msat = payment_hop.next_hops_fee_msat;
+ spent_on_hop_msat += next_hops_fee_msat;
+ if *channel_liquidity_available_msat < spent_on_hop_msat {
+ // This should not happen because we do recompute fees right before,
+ // trying to avoid cases when a hop is not usable due to the fee situation.
+ break 'path_construction;
+ }
+ *channel_liquidity_available_msat -= spent_on_hop_msat;
}
+ // Track the total amount all our collected paths allow to send so that we:
+ // - know when to stop looking for more paths
+ // - know which of the hops are useless considering how much more sats we need
+ // (contributes_sufficient_value)
+ already_collected_value_msat += value_contribution_msat;
+
+ payment_paths.push(payment_path);
+ found_new_path = true;
+ break 'path_construction;
+ }
- let new_entry = match dist.remove(&res.last().unwrap().pubkey) {
- Some(hop) => hop.3,
- None => return Err(LightningError{err: "Failed to find a non-fee-overflowing path to the given destination".to_owned(), action: ErrorAction::IgnoreError}),
- };
- res.last_mut().unwrap().fee_msat = new_entry.fee_msat;
- res.last_mut().unwrap().cltv_expiry_delta = new_entry.cltv_expiry_delta;
- res.push(new_entry);
+ // Otherwise, since the current target node is not us,
+ // keep "unrolling" the payment graph from payee to payer by
+ // finding a way to reach the current target from the payer side.
+ match network.get_nodes().get(&pubkey) {
+ None => {},
+ Some(node) => {
+ add_entries_to_cheapest_to_target_node!(node, &pubkey, lowest_fee_to_node, value_contribution_msat);
+ },
}
- res.last_mut().unwrap().fee_msat = final_value_msat;
- res.last_mut().unwrap().cltv_expiry_delta = final_cltv;
- let route = Route { paths: vec![res] };
- log_trace!(logger, "Got route: {}", log_route!(route));
- return Ok(route);
}
- match network.get_nodes().get(&pubkey) {
- None => {},
- Some(node) => {
- add_entries_to_cheapest_to_target_node!(node, &pubkey, lowest_fee_to_node);
- },
+ // Step (3).
+ // Stop either when recommended value is reached,
+ // or if during last iteration no new path was found.
+ // In the latter case, making another path finding attempt could not help,
+ // because we deterministically terminate the search due to low liquidity.
+ if already_collected_value_msat >= recommended_value_msat || !found_new_path {
+ break 'paths_collection;
+ }
+ }
+
+ // Step (4).
+ if payment_paths.len() == 0 {
+ return Err(LightningError{err: "Failed to find a path to the given destination".to_owned(), action: ErrorAction::IgnoreError});
+ }
+
+ if already_collected_value_msat < final_value_msat {
+ return Err(LightningError{err: "Failed to find a sufficient route to the given destination".to_owned(), action: ErrorAction::IgnoreError});
+ }
+
+ // Sort by total fees and take the best paths.
+ payment_paths.sort_by_key(|path| path.get_total_fee_paid_msat());
+ if payment_paths.len() > 50 {
+ payment_paths.truncate(50);
+ }
+
+ // Draw multiple sufficient routes by randomly combining the selected paths.
+ let mut drawn_routes = Vec::new();
+ for i in 0..payment_paths.len() {
+ let mut cur_route = Vec::<PaymentPath>::new();
+ let mut aggregate_route_value_msat = 0;
+
+ // Step (5).
+ // TODO: real random shuffle
+ // Currently just starts with i_th and goes up to i-1_th in a looped way.
+ let cur_payment_paths = [&payment_paths[i..], &payment_paths[..i]].concat();
+
+ // Step (6).
+ for payment_path in cur_payment_paths {
+ cur_route.push(payment_path.clone());
+ aggregate_route_value_msat += payment_path.get_value_msat();
+ if aggregate_route_value_msat > final_value_msat {
+ // Last path likely overpaid. Substract it from the most expensive
+ // (in terms of proportional fee) path in this route and recompute fees.
+ // This might be not the most economically efficient way, but fewer paths
+ // also makes routing more reliable.
+ let mut overpaid_value_msat = aggregate_route_value_msat - final_value_msat;
+
+ // First, drop some expensive low-value paths entirely if possible.
+ // Sort by value so that we drop many really-low values first, since
+ // fewer paths is better: the payment is less likely to fail.
+ // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
+ // so that the sender pays less fees overall. And also htlc_minimum_msat.
+ cur_route.sort_by_key(|path| path.get_value_msat());
+ // We should make sure that at least 1 path left.
+ let mut paths_left = cur_route.len();
+ cur_route.retain(|path| {
+ if paths_left == 1 {
+ return true
+ }
+ let mut keep = true;
+ let path_value_msat = path.get_value_msat();
+ if path_value_msat <= overpaid_value_msat {
+ keep = false;
+ overpaid_value_msat -= path_value_msat;
+ paths_left -= 1;
+ }
+ keep
+ });
+
+ if overpaid_value_msat == 0 {
+ break;
+ }
+
+ assert!(cur_route.len() > 0);
+
+ // Step (7).
+ // Now, substract the overpaid value from the most-expensive path.
+ // TODO: this could also be optimized by also sorting by feerate_per_sat_routed,
+ // so that the sender pays less fees overall. And also htlc_minimum_msat.
+ cur_route.sort_by_key(|path| { path.hops.iter().map(|hop| hop.channel_fees.proportional_millionths as u64).sum::<u64>() });
+ let mut expensive_payment_path = cur_route.first_mut().unwrap();
+ // We already dropped all the small channels above, meaning all the
+ // remaining channels are larger than remaining overpaid_value_msat.
+ // Thus, this can't be negative.
+ let expensive_path_new_value_msat = expensive_payment_path.get_value_msat() - overpaid_value_msat;
+ expensive_payment_path.update_value_and_recompute_fees(expensive_path_new_value_msat);
+ break;
+ }
}
+ drawn_routes.push(cur_route);
+ }
+
+ // Step (8).
+ // Select the best route by lowest total fee.
+ drawn_routes.sort_by_key(|paths| paths.iter().map(|path| path.get_total_fee_paid_msat()).sum::<u64>());
+ let mut selected_paths = Vec::<Vec::<RouteHop>>::new();
+ for payment_path in drawn_routes.first().unwrap() {
+ selected_paths.push(payment_path.hops.iter().map(|payment_hop| payment_hop.route_hop.clone()).collect());
}
- Err(LightningError{err: "Failed to find a path to the given destination".to_owned(), action: ErrorAction::IgnoreError})
+ let route = Route { paths: selected_paths };
+ log_trace!(logger, "Got route: {}", log_route!(route));
+ return Ok(route);
}
#[cfg(test)]
use bitcoin::hashes::Hash;
use bitcoin::network::constants::Network;
use bitcoin::blockdata::constants::genesis_block;
+ use bitcoin::blockdata::script::Builder;
+ use bitcoin::blockdata::opcodes;
+ use bitcoin::blockdata::transaction::TxOut;
use hex;
match net_graph_msg_handler.handle_channel_update(&valid_channel_update) {
Ok(res) => assert!(res),
- // Err(_) => panic!()
- Err(e) => println!("{:?}", e.err)
+ Err(_) => panic!()
};
}
-
fn add_or_update_node(net_graph_msg_handler: &NetGraphMsgHandler<Arc<test_utils::TestChainSource>, Arc<test_utils::TestLogger>>, secp_ctx: &Secp256k1<All>, node_privkey: &SecretKey,
features: NodeFeatures, timestamp: u32) {
let node_id = PublicKey::from_secret_key(&secp_ctx, node_privkey);
}
}
- fn build_graph() -> (Secp256k1<All>, NetGraphMsgHandler<std::sync::Arc<crate::util::test_utils::TestChainSource>, std::sync::Arc<crate::util::test_utils::TestLogger>>, std::sync::Arc<test_utils::TestLogger>) {
+ fn build_graph() -> (Secp256k1<All>, NetGraphMsgHandler<std::sync::Arc<test_utils::TestChainSource>, std::sync::Arc<crate::util::test_utils::TestLogger>>, std::sync::Arc<test_utils::TestChainSource>, std::sync::Arc<test_utils::TestLogger>) {
let secp_ctx = Secp256k1::new();
let logger = Arc::new(test_utils::TestLogger::new());
+ let chain_monitor = Arc::new(test_utils::TestChainSource::new(Network::Testnet));
let net_graph_msg_handler = NetGraphMsgHandler::new(genesis_block(Network::Testnet).header.block_hash(), None, Arc::clone(&logger));
// Build network from our_id to node7:
//
add_or_update_node(&net_graph_msg_handler, &secp_ctx, &privkeys[5], NodeFeatures::from_le_bytes(id_to_feature_flags(6)), 0);
- (secp_ctx, net_graph_msg_handler, logger)
+ (secp_ctx, net_graph_msg_handler, chain_monitor, logger)
}
#[test]
fn simple_route_test() {
- let (secp_ctx, net_graph_msg_handler, logger) = build_graph();
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
- // Simple route to 3 via 2
+ // Simple route to 2 via 1
+
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 0, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Cannot send a payment of 0 msat");
+ } else { panic!(); }
+
let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
assert_eq!(route.paths[0].len(), 2);
}
#[test]
- fn disable_channels_test() {
- let (secp_ctx, net_graph_msg_handler, logger) = build_graph();
+ fn invalid_first_hop_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
+
+ // Simple route to 2 via 1
+
+ let our_chans = vec![channelmanager::ChannelDetails {
+ channel_id: [0; 32],
+ short_channel_id: Some(2),
+ remote_network_id: our_id,
+ counterparty_features: InitFeatures::from_le_bytes(vec![0b11]),
+ channel_value_satoshis: 100000,
+ user_id: 0,
+ outbound_capacity_msat: 100000,
+ inbound_capacity_msat: 100000,
+ is_live: true,
+ }];
+
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()), &Vec::new(), 100, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "First hop cannot have our_node_id as a destination.");
+ } else { panic!(); }
+
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths[0].len(), 2);
+ }
+
+ #[test]
+ fn htlc_minimum_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
- // // Disable channels 4 and 12 by flags=2
- update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ // Simple route to 2 via 1
+
+ // Disable other paths
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
chain_hash: genesis_block(Network::Testnet).header.block_hash(),
- short_channel_id: 4,
+ short_channel_id: 12,
timestamp: 2,
flags: 2, // to disable
cltv_expiry_delta: 0,
fee_proportional_millionths: 0,
excess_data: Vec::new()
});
- update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
chain_hash: genesis_block(Network::Testnet).header.block_hash(),
- short_channel_id: 12,
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 2, // to disable
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 2, // to disable
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 6,
+ timestamp: 2,
+ flags: 2, // to disable
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 7,
timestamp: 2,
flags: 2, // to disable
cltv_expiry_delta: 0,
excess_data: Vec::new()
});
- // If all the channels require some features we don't understand, route should fail
- if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 100, 42, Arc::clone(&logger)) {
+ // Check against amount_to_transfer_over_msat.
+ // Set minimal HTLC of 200_000_000 msat.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 3,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 200_000_000,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Second hop only allows to forward 199_999_999 at most, thus not allowing the first hop to
+ // be used.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 4,
+ timestamp: 3,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(199_999_999),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Not possible to send 199_999_999, because the minimum on channel=2 is 200_000_000.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 199_999_999, 42, Arc::clone(&logger)) {
assert_eq!(err, "Failed to find a path to the given destination");
} else { panic!(); }
- // If we specify a channel to node7, that overrides our local channel view and that gets used
- let our_chans = vec![channelmanager::ChannelDetails {
- channel_id: [0; 32],
- short_channel_id: Some(42),
- remote_network_id: nodes[7].clone(),
- counterparty_features: InitFeatures::from_le_bytes(vec![0b11]),
- channel_value_satoshis: 0,
- user_id: 0,
- outbound_capacity_msat: 0,
- inbound_capacity_msat: 0,
- is_live: true,
- }];
- let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
- assert_eq!(route.paths[0].len(), 2);
-
- assert_eq!(route.paths[0][0].pubkey, nodes[7]);
- assert_eq!(route.paths[0][0].short_channel_id, 42);
- assert_eq!(route.paths[0][0].fee_msat, 200);
- assert_eq!(route.paths[0][0].cltv_expiry_delta, (13 << 8) | 1);
- assert_eq!(route.paths[0][0].node_features.le_flags(), &vec![0b11]); // it should also override our view of their features
- assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::<u8>::new()); // No feature flags will meet the relevant-to-channel conversion
+ // Lift the restriction on the first hop.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 4,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
- assert_eq!(route.paths[0][1].pubkey, nodes[2]);
- assert_eq!(route.paths[0][1].short_channel_id, 13);
- assert_eq!(route.paths[0][1].fee_msat, 100);
- assert_eq!(route.paths[0][1].cltv_expiry_delta, 42);
- assert_eq!(route.paths[0][1].node_features.le_flags(), &id_to_feature_flags(3));
- assert_eq!(route.paths[0][1].channel_features.le_flags(), &id_to_feature_flags(13));
+ // A payment above the minimum should pass
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 199_999_999, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths[0].len(), 2);
}
#[test]
- fn disable_node_test() {
- let (secp_ctx, net_graph_msg_handler, logger) = build_graph();
- let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+ fn htlc_minimum_overpay_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // A route to node#2 via two paths.
+ // One path allows transferring 35-40 sats, another one also allows 35-40 sats.
+ // Thus, they can't send 60 without overpaying.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 35_000,
+ htlc_maximum_msat: OptionalField::Present(40_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 3,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 35_000,
+ htlc_maximum_msat: OptionalField::Present(40_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Make 0 fee.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 4,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Disable other paths
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 3,
+ flags: 2, // to disable
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 60_000, 42, Arc::clone(&logger)).unwrap();
+ // Overpay fees to hit htlc_minimum_msat.
+ let overpaid_fees = route.paths[0][0].fee_msat + route.paths[1][0].fee_msat;
+ // TODO: this could be better balanced to overpay 10k and not 15k.
+ assert_eq!(overpaid_fees, 15_000);
+
+ // Now, test that if there are 2 paths, a "cheaper" by fee path wouldn't be prioritized
+ // while taking even more fee to match htlc_minimum_msat.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 4,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 65_000,
+ htlc_maximum_msat: OptionalField::Present(80_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 3,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 4,
+ timestamp: 4,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 100_000,
+ excess_data: Vec::new()
+ });
+
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 60_000, 42, Arc::clone(&logger)).unwrap();
+ // Fine to overpay for htlc_minimum_msat if it allows us to save fee.
+ assert_eq!(route.paths.len(), 1);
+ assert_eq!(route.paths[0][0].short_channel_id, 12);
+ let fees = route.paths[0][0].fee_msat;
+ assert_eq!(fees, 5_000);
+
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 50_000, 42, Arc::clone(&logger)).unwrap();
+ // Not fine to overpay for htlc_minimum_msat if it requires paying more than fee on
+ // the other channel.
+ assert_eq!(route.paths.len(), 1);
+ assert_eq!(route.paths[0][0].short_channel_id, 2);
+ let fees = route.paths[0][0].fee_msat;
+ assert_eq!(fees, 5_000);
+ }
+
+ #[test]
+ fn disable_channels_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // // Disable channels 4 and 12 by flags=2
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 4,
+ timestamp: 2,
+ flags: 2, // to disable
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 2, // to disable
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // If all the channels require some features we don't understand, route should fail
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 100, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a path to the given destination");
+ } else { panic!(); }
+
+ // If we specify a channel to node7, that overrides our local channel view and that gets used
+ let our_chans = vec![channelmanager::ChannelDetails {
+ channel_id: [0; 32],
+ short_channel_id: Some(42),
+ remote_network_id: nodes[7].clone(),
+ counterparty_features: InitFeatures::from_le_bytes(vec![0b11]),
+ channel_value_satoshis: 0,
+ user_id: 0,
+ outbound_capacity_msat: 250_000_000,
+ inbound_capacity_msat: 0,
+ is_live: true,
+ }];
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()), &Vec::new(), 100, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths[0].len(), 2);
+
+ assert_eq!(route.paths[0][0].pubkey, nodes[7]);
+ assert_eq!(route.paths[0][0].short_channel_id, 42);
+ assert_eq!(route.paths[0][0].fee_msat, 200);
+ assert_eq!(route.paths[0][0].cltv_expiry_delta, (13 << 8) | 1);
+ assert_eq!(route.paths[0][0].node_features.le_flags(), &vec![0b11]); // it should also override our view of their features
+ assert_eq!(route.paths[0][0].channel_features.le_flags(), &Vec::<u8>::new()); // No feature flags will meet the relevant-to-channel conversion
+
+ assert_eq!(route.paths[0][1].pubkey, nodes[2]);
+ assert_eq!(route.paths[0][1].short_channel_id, 13);
+ assert_eq!(route.paths[0][1].fee_msat, 100);
+ assert_eq!(route.paths[0][1].cltv_expiry_delta, 42);
+ assert_eq!(route.paths[0][1].node_features.le_flags(), &id_to_feature_flags(3));
+ assert_eq!(route.paths[0][1].channel_features.le_flags(), &id_to_feature_flags(13));
+ }
+
+ #[test]
+ fn disable_node_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (_, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
// Disable nodes 1, 2, and 8 by requiring unknown feature bits
let mut unknown_features = NodeFeatures::known();
counterparty_features: InitFeatures::from_le_bytes(vec![0b11]),
channel_value_satoshis: 0,
user_id: 0,
- outbound_capacity_msat: 0,
+ outbound_capacity_msat: 250_000_000,
inbound_capacity_msat: 0,
is_live: true,
}];
#[test]
fn our_chans_test() {
- let (secp_ctx, net_graph_msg_handler, logger) = build_graph();
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
// Route to 1 via 2 and 3 because our channel to 1 is disabled
counterparty_features: InitFeatures::from_le_bytes(vec![0b11]),
channel_value_satoshis: 0,
user_id: 0,
- outbound_capacity_msat: 0,
+ outbound_capacity_msat: 250_000_000,
inbound_capacity_msat: 0,
is_live: true,
}];
short_channel_id: 8,
fees: zero_fees,
cltv_expiry_delta: (8 << 8) | 1,
- htlc_minimum_msat: 0,
+ htlc_minimum_msat: None,
+ htlc_maximum_msat: None,
}, RouteHint {
src_node_id: nodes[4].clone(),
short_channel_id: 9,
proportional_millionths: 0,
},
cltv_expiry_delta: (9 << 8) | 1,
- htlc_minimum_msat: 0,
+ htlc_minimum_msat: None,
+ htlc_maximum_msat: None,
}, RouteHint {
src_node_id: nodes[5].clone(),
short_channel_id: 10,
fees: zero_fees,
cltv_expiry_delta: (10 << 8) | 1,
- htlc_minimum_msat: 0,
+ htlc_minimum_msat: None,
+ htlc_maximum_msat: None,
})
}
#[test]
fn last_hops_test() {
- let (secp_ctx, net_graph_msg_handler, logger) = build_graph();
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
// Simple test across 2, 3, 5, and 4 via a last_hop channel
+
+ // First check that lst hop can't have its source as the payee.
+ let invalid_last_hop = RouteHint {
+ src_node_id: nodes[6],
+ short_channel_id: 8,
+ fees: RoutingFees {
+ base_msat: 1000,
+ proportional_millionths: 0,
+ },
+ cltv_expiry_delta: (8 << 8) | 1,
+ htlc_minimum_msat: None,
+ htlc_maximum_msat: None,
+ };
+
+ let mut invalid_last_hops = last_hops(&nodes);
+ invalid_last_hops.push(invalid_last_hop);
+ {
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &invalid_last_hops.iter().collect::<Vec<_>>(), 100, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Last hop cannot have a payee as a source.");
+ } else { panic!(); }
+ }
+
let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[6], None, &last_hops(&nodes).iter().collect::<Vec<_>>(), 100, 42, Arc::clone(&logger)).unwrap();
assert_eq!(route.paths[0].len(), 5);
#[test]
fn our_chans_last_hop_connect_test() {
- let (secp_ctx, net_graph_msg_handler, logger) = build_graph();
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
let (_, our_id, _, nodes) = get_nodes(&secp_ctx);
// Simple test with outbound channel to 4 to test that last_hops and first_hops connect
counterparty_features: InitFeatures::from_le_bytes(vec![0b11]),
channel_value_satoshis: 0,
user_id: 0,
- outbound_capacity_msat: 0,
+ outbound_capacity_msat: 250_000_000,
inbound_capacity_msat: 0,
is_live: true,
}];
proportional_millionths: 0,
},
cltv_expiry_delta: (8 << 8) | 1,
- htlc_minimum_msat: 0,
+ htlc_minimum_msat: None,
+ htlc_maximum_msat: None,
}];
let our_chans = vec![channelmanager::ChannelDetails {
channel_id: [0; 32],
assert_eq!(route.paths[0][1].node_features.le_flags(), &[0; 0]); // We dont pass flags in from invoices yet
assert_eq!(route.paths[0][1].channel_features.le_flags(), &[0; 0]); // We can't learn any flags from invoices, sadly
}
+
+ #[test]
+ fn available_amount_while_routing_test() {
+ // Tests whether we choose the correct available channel amount while routing.
+
+ let (secp_ctx, mut net_graph_msg_handler, chain_monitor, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // We will use a simple single-path route from
+ // our node to node2 via node0: channels {1, 3}.
+
+ // First disable all other paths.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Make the first channel (#1) very permissive,
+ // and we will be testing all limits on the second channel.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(1_000_000_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // First, let's see if routing works if we have absolutely no idea about the available amount.
+ // In this case, it should be set to 250_000 sats.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 250_000_001, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route an exact amount we have should be fine.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 250_000_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let path = route.paths.last().unwrap();
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ assert_eq!(path.last().unwrap().fee_msat, 250_000_000);
+ }
+
+ // Check that setting outbound_capacity_msat in first_hops limits the channels.
+ // Disable channel #1 and use another first hop.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 3,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(1_000_000_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Now, limit the first_hop by the outbound_capacity_msat of 200_000 sats.
+ let our_chans = vec![channelmanager::ChannelDetails {
+ channel_id: [0; 32],
+ short_channel_id: Some(42),
+ remote_network_id: nodes[0].clone(),
+ counterparty_features: InitFeatures::from_le_bytes(vec![0b11]),
+ channel_value_satoshis: 0,
+ user_id: 0,
+ outbound_capacity_msat: 200_000_000,
+ inbound_capacity_msat: 0,
+ is_live: true,
+ }];
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()), &Vec::new(), 200_000_001, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route an exact amount we have should be fine.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], Some(&our_chans.iter().collect::<Vec<_>>()), &Vec::new(), 200_000_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let path = route.paths.last().unwrap();
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ assert_eq!(path.last().unwrap().fee_msat, 200_000_000);
+ }
+
+ // Enable channel #1 back.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 4,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(1_000_000_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+
+ // Now let's see if routing works if we know only htlc_maximum_msat.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 3,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(15_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 15_001, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route an exact amount we have should be fine.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 15_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let path = route.paths.last().unwrap();
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ assert_eq!(path.last().unwrap().fee_msat, 15_000);
+ }
+
+ // Now let's see if routing works if we know only capacity from the UTXO.
+
+ // We can't change UTXO capacity on the fly, so we'll disable
+ // the existing channel and add another one with the capacity we need.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 4,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ let good_script = Builder::new().push_opcode(opcodes::all::OP_PUSHNUM_2)
+ .push_slice(&PublicKey::from_secret_key(&secp_ctx, &privkeys[0]).serialize())
+ .push_slice(&PublicKey::from_secret_key(&secp_ctx, &privkeys[2]).serialize())
+ .push_opcode(opcodes::all::OP_PUSHNUM_2)
+ .push_opcode(opcodes::all::OP_CHECKMULTISIG).into_script().to_v0_p2wsh();
+
+ *chain_monitor.utxo_ret.lock().unwrap() = Ok(TxOut { value: 15, script_pubkey: good_script.clone() });
+ net_graph_msg_handler.add_chain_access(Some(chain_monitor));
+
+ add_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], &privkeys[2], ChannelFeatures::from_le_bytes(id_to_feature_flags(3)), 333);
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 333,
+ timestamp: 1,
+ flags: 0,
+ cltv_expiry_delta: (3 << 8) | 1,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 333,
+ timestamp: 1,
+ flags: 1,
+ cltv_expiry_delta: (3 << 8) | 2,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 100,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 15_001, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route an exact amount we have should be fine.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 15_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let path = route.paths.last().unwrap();
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ assert_eq!(path.last().unwrap().fee_msat, 15_000);
+ }
+
+ // Now let's see if routing chooses htlc_maximum_msat over UTXO capacity.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 333,
+ timestamp: 6,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(10_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 10_001, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route an exact amount we have should be fine.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 10_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let path = route.paths.last().unwrap();
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ assert_eq!(path.last().unwrap().fee_msat, 10_000);
+ }
+ }
+
+ #[test]
+ fn available_liquidity_last_hop_test() {
+ // Check that available liquidity properly limits the path even when only
+ // one of the latter hops is limited.
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // Path via {node7, node2, node4} is channels {12, 13, 6, 11}.
+ // {12, 13, 11} have the capacities of 100, {6} has a capacity of 50.
+ // Total capacity: 50 sats.
+
+ // Disable other potential paths.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 7,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Limit capacities
+
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 6,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(50_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[4], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 11,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 60_000, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route 49 sats (just a bit below the capacity).
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 49_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.len(), 4);
+ assert_eq!(path.last().unwrap().pubkey, nodes[3]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 49_000);
+ }
+
+ {
+ // Attempt to route an exact amount is also fine
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 50_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.len(), 4);
+ assert_eq!(path.last().unwrap().pubkey, nodes[3]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 50_000);
+ }
+ }
+
+ #[test]
+ fn ignore_fee_first_hop_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // Path via node0 is channels {1, 3}. Limit them to 100 and 50 sats (total limit 50).
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 1_000_000,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(50_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 50_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 1);
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 50_000);
+ }
+ }
+
+ #[test]
+ fn simple_mpp_route_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // We need a route consisting of 3 paths:
+ // From our node to node2 via node0, node7, node1 (three paths one hop each).
+ // To achieve this, the amount being transferred should be around
+ // the total capacity of these 3 paths.
+
+ // First, we set limits on these (previously unlimited) channels.
+ // Their aggregate capacity will be 50 + 60 + 180 = 290 sats.
+
+ // Path via node0 is channels {1, 3}. Limit them to 100 and 50 sats (total limit 50).
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(50_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via node7 is channels {12, 13}. Limit them to 60 and 60 sats
+ // (total limit 60).
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(60_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(60_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via node1 is channels {2, 4}. Limit them to 200 and 180 sats
+ // (total capacity 180 sats).
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(200_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 4,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(180_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 300_000, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route 250 sats (just a bit below the capacity).
+ // Our algorithm should provide us with these 3 paths.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 250_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 3);
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 250_000);
+ }
+
+ {
+ // Attempt to route an exact amount is also fine
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 290_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 3);
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 290_000);
+ }
+ }
+
+ #[test]
+ fn long_mpp_route_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // We need a route consisting of 3 paths:
+ // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}.
+ // Note that these paths overlap (channels 5, 12, 13).
+ // We will route 300 sats.
+ // Each path will have 100 sats capacity, those channels which
+ // are used twice will have 200 sats capacity.
+
+ // Disable other potential paths.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 7,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node0, node2} is channels {1, 3, 5}.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Capacity of 200 sats because this channel will be used by 3rd path as well.
+ add_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(5)), 5);
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 5,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(200_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node7, node2, node4} is channels {12, 13, 6, 11}.
+ // Add 100 sats to the capacities of {12, 13}, because these channels
+ // are also used for 3rd path. 100 sats for the rest. Total capacity: 100 sats.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(200_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(200_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 6,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[4], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 11,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node7, node2} is channels {12, 13, 5}.
+ // We already limited them to 200 sats (they are used twice for 100 sats).
+ // Nothing to do here.
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 350_000, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route 300 sats (exact amount we can route).
+ // Our algorithm should provide us with these 3 paths, 100 sats each.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 300_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 3);
+
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.last().unwrap().pubkey, nodes[3]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 300_000);
+ }
+
+ }
+
+ #[test]
+ fn mpp_cheaper_route_test() {
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // This test checks that if we have two cheaper paths and one more expensive path,
+ // so that liquidity-wise any 2 of 3 combination is sufficient,
+ // two cheaper paths will be taken.
+ // These paths have equal available liquidity.
+
+ // We need a combination of 3 paths:
+ // From our node to node3 via {node0, node2}, {node7, node2, node4} and {node7, node2}.
+ // Note that these paths overlap (channels 5, 12, 13).
+ // Each path will have 100 sats capacity, those channels which
+ // are used twice will have 200 sats capacity.
+
+ // Disable other potential paths.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 7,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node0, node2} is channels {1, 3, 5}.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Capacity of 200 sats because this channel will be used by 3rd path as well.
+ add_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(5)), 5);
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 5,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(200_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node7, node2, node4} is channels {12, 13, 6, 11}.
+ // Add 100 sats to the capacities of {12, 13}, because these channels
+ // are also used for 3rd path. 100 sats for the rest. Total capacity: 100 sats.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(200_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(200_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 6,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 1_000,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[4], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 11,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node7, node2} is channels {12, 13, 5}.
+ // We already limited them to 200 sats (they are used twice for 100 sats).
+ // Nothing to do here.
+
+ {
+ // Now, attempt to route 180 sats.
+ // Our algorithm should provide us with these 2 paths.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 180_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 2);
+
+ let mut total_value_transferred_msat = 0;
+ let mut total_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.last().unwrap().pubkey, nodes[3]);
+ total_value_transferred_msat += path.last().unwrap().fee_msat;
+ for hop in path {
+ total_paid_msat += hop.fee_msat;
+ }
+ }
+ // If we paid fee, this would be higher.
+ assert_eq!(total_value_transferred_msat, 180_000);
+ let total_fees_paid = total_paid_msat - total_value_transferred_msat;
+ assert_eq!(total_fees_paid, 0);
+ }
+ }
+
+ #[test]
+ fn fees_on_mpp_route_test() {
+ // This test makes sure that MPP algorithm properly takes into account
+ // fees charged on the channels, by making the fees impactful:
+ // if the fee is not properly accounted for, the behavior is different.
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // We need a route consisting of 2 paths:
+ // From our node to node3 via {node0, node2} and {node7, node2, node4}.
+ // We will route 200 sats, Each path will have 100 sats capacity.
+
+ // This test is not particularly stable: e.g.,
+ // there's a way to route via {node0, node2, node4}.
+ // It works while pathfinding is deterministic, but can be broken otherwise.
+ // It's fine to ignore this concern for now.
+
+ // Disable other potential paths.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 7,
+ timestamp: 2,
+ flags: 2,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node0, node2} is channels {1, 3, 5}.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ add_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], &privkeys[3], ChannelFeatures::from_le_bytes(id_to_feature_flags(5)), 5);
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 5,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via {node7, node2, node4} is channels {12, 13, 6, 11}.
+ // All channels should be 100 sats capacity. But for the fee experiment,
+ // we'll add absolute fee of 150 sats paid for the use channel 6 (paid to node2 on channel 13).
+ // Since channel 12 allows to deliver only 250 sats to channel 13, channel 13 can transfer only
+ // 100 sats (and pay 150 sats in fees for the use of channel 6),
+ // so no matter how large are other channels,
+ // the whole path will be limited by 100 sats with just these 2 conditions:
+ // - channel 12 capacity is 250 sats
+ // - fee for channel 6 is 150 sats
+ // Let's test this by enforcing these 2 conditions and removing other limits.
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(250_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[2], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 6,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 150_000,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[4], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 11,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Absent,
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 210_000, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route 200 sats (exact amount we can route).
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[3], None, &Vec::new(), 200_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 2);
+
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.last().unwrap().pubkey, nodes[3]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 200_000);
+ }
+
+ }
+
+ #[test]
+ fn drop_lowest_channel_mpp_route_test() {
+ // This test checks that low-capacity channel is dropped when after
+ // path finding we realize that we found more capacity than we need.
+ let (secp_ctx, net_graph_msg_handler, _, logger) = build_graph();
+ let (our_privkey, our_id, privkeys, nodes) = get_nodes(&secp_ctx);
+
+ // We need a route consisting of 3 paths:
+ // From our node to node2 via node0, node7, node1 (three paths one hop each).
+
+ // The first and the second paths should be sufficient, but the third should be
+ // cheaper, so that we select it but drop later.
+
+ // First, we set limits on these (previously unlimited) channels.
+ // Their aggregate capacity will be 50 + 60 + 20 = 130 sats.
+
+ // Path via node0 is channels {1, 3}. Limit them to 100 and 50 sats (total limit 50);
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 1,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(100_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[0], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 3,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(50_000),
+ fee_base_msat: 100,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via node7 is channels {12, 13}. Limit them to 60 and 60 sats (total limit 60);
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 12,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(60_000),
+ fee_base_msat: 100,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[7], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 13,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(60_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ // Path via node1 is channels {2, 4}. Limit them to 20 and 20 sats (total capacity 20 sats).
+ update_channel(&net_graph_msg_handler, &secp_ctx, &our_privkey, UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 2,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(20_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+ update_channel(&net_graph_msg_handler, &secp_ctx, &privkeys[1], UnsignedChannelUpdate {
+ chain_hash: genesis_block(Network::Testnet).header.block_hash(),
+ short_channel_id: 4,
+ timestamp: 2,
+ flags: 0,
+ cltv_expiry_delta: 0,
+ htlc_minimum_msat: 0,
+ htlc_maximum_msat: OptionalField::Present(20_000),
+ fee_base_msat: 0,
+ fee_proportional_millionths: 0,
+ excess_data: Vec::new()
+ });
+
+ {
+ // Attempt to route more than available results in a failure.
+ if let Err(LightningError{err, action: ErrorAction::IgnoreError}) = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 150_000, 42, Arc::clone(&logger)) {
+ assert_eq!(err, "Failed to find a sufficient route to the given destination");
+ } else { panic!(); }
+ }
+
+ {
+ // Now, attempt to route 125 sats (just a bit below the capacity of 3 channels).
+ // Our algorithm should provide us with these 3 paths.
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 125_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 3);
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 125_000);
+ }
+
+ {
+ // Attempt to route without the last small cheap channel
+ let route = get_route(&our_id, &net_graph_msg_handler.network_graph.read().unwrap(), &nodes[2], None, &Vec::new(), 90_000, 42, Arc::clone(&logger)).unwrap();
+ assert_eq!(route.paths.len(), 2);
+ let mut total_amount_paid_msat = 0;
+ for path in &route.paths {
+ assert_eq!(path.len(), 2);
+ assert_eq!(path.last().unwrap().pubkey, nodes[2]);
+ total_amount_paid_msat += path.last().unwrap().fee_msat;
+ }
+ assert_eq!(total_amount_paid_msat, 90_000);
+ }
+ }
+
}
+
+#[cfg(all(test, feature = "unstable"))]
+mod benches {
+ use super::*;
+ use util::logger::{Logger, Record};
+
+ use std::fs::File;
+ use test::Bencher;
+
+ struct DummyLogger {}
+ impl Logger for DummyLogger {
+ fn log(&self, _record: &Record) {}
+ }
+
+ #[bench]
+ fn generate_routes(bench: &mut Bencher) {
+ let mut d = File::open("net_graph-2021-02-12.bin").expect("Please fetch https://bitcoin.ninja/ldk-net_graph-879e309c128-2020-02-12.bin and place it at lightning/net_graph-2021-02-12.bin");
+ let graph = NetworkGraph::read(&mut d).unwrap();
+
+ // First, get 100 (source, destination) pairs for which route-getting actually succeeds...
+ let mut path_endpoints = Vec::new();
+ let mut seed: usize = 0xdeadbeef;
+ 'load_endpoints: for _ in 0..100 {
+ loop {
+ seed *= 0xdeadbeef;
+ let src = graph.get_nodes().keys().skip(seed % graph.get_nodes().len()).next().unwrap();
+ seed *= 0xdeadbeef;
+ let dst = graph.get_nodes().keys().skip(seed % graph.get_nodes().len()).next().unwrap();
+ let amt = seed as u64 % 1_000_000;
+ if get_route(src, &graph, dst, None, &[], amt, 42, &DummyLogger{}).is_ok() {
+ path_endpoints.push((src, dst, amt));
+ continue 'load_endpoints;
+ }
+ }
+ }
+
+ // ...then benchmark finding paths between the nodes we learned.
+ let mut idx = 0;
+ bench.iter(|| {
+ let (src, dst, amt) = path_endpoints[idx % path_endpoints.len()];
+ assert!(get_route(src, &graph, dst, None, &[], amt, 42, &DummyLogger{}).is_ok());
+ idx += 1;
+ });
+ }
+}