/// See https://github.com/lightning/bolts/issues/905 for more details.
pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
+ // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
+ pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
+
/// Used to return a simple Error back to ChannelManager. Will get converted to a
/// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
/// channel_id in ChannelManager.
}
/// Returns a minimum channel reserve value the remote needs to maintain,
- /// required by us.
+ /// required by us according to the configured or default
+ /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
///
/// Guaranteed to return a value no larger than channel_value_satoshis
///
- /// This is used both for new channels and to figure out what reserve value we sent to peers
- /// for channels serialized before we included our selected reserve value in the serialized
- /// data explicitly.
- pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
+ /// This is used both for outbound and inbound channels and has lower bound
+ /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
+ pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
+ let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
+ cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
+ }
+
+ /// This is for legacy reasons, present for forward-compatibility.
+ /// LDK versions older than 0.0.104 don't know how read/handle values other than default
+ /// from storage. Hence, we use this function to not persist default values of
+ /// `holder_selected_channel_reserve_satoshis` for channels into storage.
+ pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
let (q, _) = channel_value_satoshis.overflowing_div(100);
- cmp::min(channel_value_satoshis, cmp::max(q, 1000)) //TODO
+ cmp::min(channel_value_satoshis, cmp::max(q, 1000))
}
pub(crate) fn opt_anchors(&self) -> bool {
if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
}
- let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis);
+ let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
}
- let feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
let commitment_tx_fee = Self::commit_tx_fee_msat(feerate, MIN_AFFORDABLE_HTLC_COUNT, opt_anchors);
// We generally don't care too much if they set the feerate to something very high, but it
// could result in the channel being useless due to everything being dust.
let upper_limit = cmp::max(250 * 25,
- fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
+ fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10);
if feerate_per_kw as u64 > upper_limit {
return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit)));
}
- let lower_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
+ let lower_limit = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
// Some fee estimators round up to the next full sat/vbyte (ie 250 sats per kw), causing
// occasional issues with feerate disagreements between an initiator that wants a feerate
// of 1.1 sat/vbyte and a receiver that wants 1.1 rounded up to 2. Thus, we always add 250
}
}
- let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis);
+ let holder_selected_channel_reserve_satoshis = Channel::<Signer>::get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
+ // Protocol level safety check in place, although it should never happen because
+ // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
}
if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
- return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). Channel value is ({} - {}).", holder_selected_channel_reserve_satoshis, full_channel_value_msat, msg.push_msat)));
+ return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
}
if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
// Propose a range from our current Background feerate to our Normal feerate plus our
// force_close_avoidance_max_fee_satoshis.
// If we fail to come to consensus, we'll have to force-close.
- let mut proposed_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Background);
- let normal_feerate = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Background);
+ let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let mut proposed_max_feerate = if self.is_outbound() { normal_feerate } else { u32::max_value() };
// The spec requires that (when the channel does not have anchors) we only send absolute
/// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
/// Also returns the list of payment_hashes for channels which we can safely fail backwards
/// immediately (others we will have to allow to time out).
- pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>) {
+ pub fn force_shutdown(&mut self, should_broadcast: bool) -> (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>) {
// Note that we MUST only generate a monitor update that indicates force-closure - we're
// called during initialization prior to the chain_monitor in the encompassing ChannelManager
// being fully configured in some cases. Thus, its likely any monitor events we generate will
// We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
// return them to fail the payment.
let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
+ let counterparty_node_id = self.get_counterparty_node_id();
for htlc_update in self.holding_cell_htlc_updates.drain(..) {
match htlc_update {
HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
- dropped_outbound_htlcs.push((source, payment_hash));
+ dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
},
_ => {}
}
// a different percentage of the channel value then 10%, which older versions of LDK used
// to set it to before the percentage was made configurable.
let serialized_holder_selected_reserve =
- if self.holder_selected_channel_reserve_satoshis != Self::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis)
+ if self.holder_selected_channel_reserve_satoshis != Self::get_legacy_default_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis)
{ Some(self.holder_selected_channel_reserve_satoshis) } else { None };
let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
let mut announcement_sigs = None;
let mut target_closing_feerate_sats_per_kw = None;
let mut monitor_pending_finalized_fulfills = Some(Vec::new());
- let mut holder_selected_channel_reserve_satoshis = Some(Self::get_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
+ let mut holder_selected_channel_reserve_satoshis = Some(Self::get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
let mut holder_max_htlc_value_in_flight_msat = Some(Self::get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
// Prior to supporting channel type negotiation, all of our channels were static_remotekey
// only, so we default to that if none was written.
#[cfg(test)]
mod tests {
+ use std::cmp;
use bitcoin::blockdata::script::{Script, Builder};
use bitcoin::blockdata::transaction::{Transaction, TxOut};
use bitcoin::blockdata::constants::genesis_block;
use ln::PaymentHash;
use ln::channelmanager::{HTLCSource, PaymentId};
use ln::channel::{Channel, InboundHTLCOutput, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator};
- use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS};
+ use ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
use ln::features::{InitFeatures, ChannelTypeFeatures};
- use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate};
+ use ln::msgs::{ChannelUpdate, DataLossProtect, DecodeError, OptionalField, UnsignedChannelUpdate, MAX_VALUE_MSAT};
use ln::script::ShutdownScript;
use ln::chan_utils;
use ln::chan_utils::{htlc_success_tx_weight, htlc_timeout_tx_weight};
use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
use bitcoin::secp256k1::ffi::Signature as FFISignature;
use bitcoin::secp256k1::{SecretKey,PublicKey};
+ use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::secp256k1::ecdsa::RecoverableSignature;
use bitcoin::hashes::sha256::Hash as Sha256;
use bitcoin::hashes::Hash;
type Signer = InMemorySigner;
fn get_node_secret(&self, _recipient: Recipient) -> Result<SecretKey, ()> { panic!(); }
+ fn ecdh(&self, _recipient: Recipient, _other_key: &PublicKey, _tweak: Option<&[u8; 32]>) -> Result<SharedSecret, ()> { panic!(); }
fn get_inbound_payment_key_material(&self) -> KeyMaterial { panic!(); }
fn get_destination_script(&self) -> Script {
let secp_ctx = Secp256k1::signing_only();
assert_eq!(chan_8.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
}
+ #[test]
+ fn test_configured_holder_selected_channel_reserve_satoshis() {
+
+ // Test that `new_outbound` and `new_from_req` create a channel with the correct
+ // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
+ test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
+
+ // Test with valid but unreasonably high channel reserves
+ // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
+ test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
+ test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
+
+ // Test with calculated channel reserve less than lower bound
+ // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
+ test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
+
+ // Test with invalid channel reserves since sum of both is greater than or equal
+ // to channel value
+ test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
+ test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
+ }
+
+ fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
+ let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
+ let logger = test_utils::TestLogger::new();
+ let secp_ctx = Secp256k1::new();
+ let seed = [42; 32];
+ let network = Network::Testnet;
+ let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
+ let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
+ let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
+
+
+ let mut outbound_node_config = UserConfig::default();
+ outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
+ let chan = Channel::<EnforcingSigner>::new_outbound(&&fee_est, &&keys_provider, outbound_node_id, &InitFeatures::known(), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42).unwrap();
+
+ let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
+ assert_eq!(chan.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
+
+ let chan_open_channel_msg = chan.get_open_channel(genesis_block(network).header.block_hash());
+ let mut inbound_node_config = UserConfig::default();
+ inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
+
+ if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
+ let chan_inbound_node = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42).unwrap();
+
+ let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
+
+ assert_eq!(chan_inbound_node.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
+ assert_eq!(chan_inbound_node.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
+ } else {
+ // Channel Negotiations failed
+ let result = Channel::<EnforcingSigner>::new_from_req(&&fee_est, &&keys_provider, inbound_node_id, &InitFeatures::known(), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, 42);
+ assert!(result.is_err());
+ }
+ }
+
#[test]
fn channel_update() {
let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
flags: 0,
cltv_expiry_delta: 100,
htlc_minimum_msat: 5,
- htlc_maximum_msat: OptionalField::Absent,
+ htlc_maximum_msat: MAX_VALUE_MSAT,
fee_base_msat: 110,
fee_proportional_millionths: 11,
excess_data: Vec::new(),
use ln::msgs;
use ln::msgs::NetAddress;
use ln::onion_utils;
-use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
+use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
use ln::wire::Encode;
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
use util::config::{UserConfig, ChannelConfig};
-use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use util::{byte_utils, events};
use util::scid_utils::fake_scid;
use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
DuplicateClaim,
}
-type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
+type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
}
}
- /// Gets the current configuration applied to all new channels, as
+ /// Gets the current configuration applied to all new channels.
pub fn get_current_default_configuration(&self) -> &UserConfig {
&self.default_configuration
}
};
for htlc_source in failed_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
let (monitor_update_option, mut failed_htlcs) = shutdown_res;
log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
for htlc_source in failed_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
if let Some((funding_txo, monitor_update)) = monitor_update_option {
// There isn't anything we can do if we get an update failure - we're already
}
}
- let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
+ let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
return_malformed_err!(err_msg, err_code);
flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
cltv_expiry_delta: chan.get_cltv_expiry_delta(),
htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
- htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
+ htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
fee_proportional_millionths: chan.get_fee_proportional_millionths(),
excess_data: Vec::new(),
HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
prev_funding_outpoint } => {
+ macro_rules! failure_handler {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
+ log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
+
+ let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
+ short_channel_id: prev_short_channel_id,
+ outpoint: prev_funding_outpoint,
+ htlc_id: prev_htlc_id,
+ incoming_packet_shared_secret: incoming_shared_secret,
+ phantom_shared_secret: $phantom_ss,
+ });
+
+ let reason = if $next_hop_unknown {
+ HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
+ } else {
+ HTLCDestination::FailedPayment{ payment_hash }
+ };
+
+ failed_forwards.push((htlc_source, payment_hash,
+ HTLCFailReason::Reason { failure_code: $err_code, data: $err_data },
+ reason
+ ));
+ continue;
+ }
+ }
macro_rules! fail_forward {
($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
{
- log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
- let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
- short_channel_id: prev_short_channel_id,
- outpoint: prev_funding_outpoint,
- htlc_id: prev_htlc_id,
- incoming_packet_shared_secret: incoming_shared_secret,
- phantom_shared_secret: $phantom_ss,
- });
- failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
- ));
- continue;
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
+ }
+ }
+ }
+ macro_rules! failed_payment {
+ ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
+ {
+ failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
}
}
}
let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
- let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
+ let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
// `update_fail_malformed_htlc`, meaning here we encrypt the error as
// if it came from us (the second-to-last hop) but contains the sha256
// of the onion.
- fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
+ failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
},
Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
- fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
+ failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
},
};
match next_hop {
onion_utils::Hop::Receive(hop_data) => {
match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
- Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
+ Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
}
},
_ => panic!(),
}
let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
failed_forwards.push((htlc_source, payment_hash,
- HTLCFailReason::Reason { failure_code, data }
+ HTLCFailReason::Reason { failure_code, data },
+ HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
));
continue;
},
};
macro_rules! fail_htlc {
- ($htlc: expr) => {
+ ($htlc: expr, $payment_hash: expr) => {
let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(
&byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
phantom_shared_secret,
}), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash: $payment_hash },
));
}
}
if htlcs.len() == 1 {
if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
}
}
if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else if total_value == $payment_data.total_msat {
htlcs.push(claimable_htlc);
new_events.push(events::Event::PaymentReceived {
let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
Ok(payment_preimage) => payment_preimage,
Err(()) => {
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
}
};
},
hash_map::Entry::Occupied(_) => {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
}
}
}
hash_map::Entry::Occupied(inbound_payment) => {
if payment_data.is_none() {
log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
continue
};
let payment_data = payment_data.unwrap();
if inbound_payment.get().payment_secret != payment_data.payment_secret {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
- fail_htlc!(claimable_htlc);
+ fail_htlc!(claimable_htlc, payment_hash);
} else {
let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
if payment_received_generated {
}
}
- for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
+ for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination);
}
self.forward_htlcs(&mut phantom_receives);
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
let mut should_persist = NotifyOption::SkipPersist;
- let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let mut handle_errors = Vec::new();
{
let mut should_persist = NotifyOption::SkipPersist;
if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
- let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
+ let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
let mut handle_errors = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
}
for htlc_source in timed_out_mpp_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
+ let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver );
}
for (err, counterparty_node_id) in handle_errors.drain(..) {
self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
+ HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash: *payment_hash });
}
}
}
if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
if desired_err_code == 0x1000 | 20 {
- // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
+ // No flags for `disabled_flags` are currently defined so they're always two zero bytes.
+ // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
0u16.write(&mut enc).expect("Writes cannot fail");
}
(upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
// be surfaced to the user.
fn fail_holding_cell_htlcs(
&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
- _counterparty_node_id: &PublicKey
+ counterparty_node_id: &PublicKey
) {
for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
match htlc_src {
hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
};
let channel_state = self.channel_state.lock().unwrap();
- self.fail_htlc_backwards_internal(channel_state,
- htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
+
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
+ self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver)
},
HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
let mut session_priv_bytes = [0; 32];
/// to fail and take the channel_state lock for each iteration (as we take ownership and may
/// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
/// still-available channels.
- fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
+ fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) {
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
//identify whether we sent it or not based on the (I presume) very different runtime
//between the branches here. We should make this async and move it into the forward HTLCs
pending_events.push(path_failure);
if let Some(ev) = full_failure_ev { pending_events.push(ev); }
},
- HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
+ HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, outpoint }) => {
let err_packet = match onion_error {
HTLCFailReason::Reason { failure_code, data } => {
log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
}
}
mem::drop(channel_state_lock);
+ let mut pending_events = self.pending_events.lock().unwrap();
if let Some(time) = forward_event {
- let mut pending_events = self.pending_events.lock().unwrap();
pending_events.push(events::Event::PendingHTLCsForwardable {
time_forwardable: time
});
}
+ pending_events.push(events::Event::HTLCHandlingFailed {
+ prev_channel_id: outpoint.to_channel_id(),
+ failed_next_destination: destination
+ });
},
}
}
self.best_block.read().unwrap().height()));
self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
- HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
+ HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data },
+ HTLCDestination::FailedPayment { payment_hash } );
} else {
match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let chan_restoration_res;
- let (mut pending_failures, finalized_claims) = {
+ let (mut pending_failures, finalized_claims, counterparty_node_id) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
return;
}
+ let counterparty_node_id = channel.get().get_counterparty_node_id();
let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
// We only send a channel_update in the case where we are just now sending a
if let Some(upd) = channel_update {
channel_state.pending_msg_events.push(upd);
}
- (updates.failed_htlcs, updates.finalized_claimed_htlcs)
+
+ (updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
};
post_handle_chan_restoration!(self, chan_restoration_res);
self.finalize_claims(finalized_claims);
for failure in pending_failures.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
}
}
}
};
for htlc_source in dropped_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
let _ = handle_error!(self, result, *counterparty_node_id);
short_channel_id, channel_outpoint)) =>
{
for failure in pending_failures.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver);
}
self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
self.finalize_claims(finalized_claim_htlcs);
let mut failed_channels = Vec::new();
let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
let has_pending_monitor_events = !pending_monitor_events.is_empty();
- for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
+ for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
for monitor_event in monitor_events.drain(..) {
match monitor_event {
MonitorEvent::HTLCEvent(htlc_update) => {
self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
},
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
failure_code, data,
- }));
+ }, HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
}
if let Some(channel_ready) = channel_ready_opt {
send_channel_ready!(short_to_chan_info, pending_msg_events, channel, channel_ready);
if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
+
timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
failure_code: 0x4000 | 15,
data: htlc_msat_height_data
- }));
+ }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
false
} else { true }
});
self.handle_init_event_channel_failures(failed_channels);
- for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
+ for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination);
}
}
};
for htlc_source in failed_htlcs.drain(..) {
- channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
+ let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
+ channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver);
}
//TODO: Broadcast channel update for closed channels, but only after we've made a
use ln::msgs::ChannelMessageHandler;
use routing::router::{PaymentParameters, RouteParameters, find_route};
use util::errors::APIError;
- use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
+ use util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use util::test_utils;
use chain::keysinterface::KeysInterface;
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
+ // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
+ // fails), the second will process the resulting failure and fail the HTLC backward
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
use routing::router::{PaymentParameters, Route, RouteHop, RouteParameters, find_route, get_route};
use ln::features::{ChannelFeatures, InitFeatures, InvoiceFeatures, NodeFeatures};
use ln::msgs;
-use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, OptionalField, ErrorAction};
+use ln::msgs::{ChannelMessageHandler, RoutingMessageHandler, ErrorAction};
use util::enforcing_trait_impls::EnforcingSigner;
use util::{byte_utils, test_utils};
-use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason};
+use util::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination};
use util::errors::APIError;
use util::ser::{Writeable, ReadableArgs};
use util::config::UserConfig;
use prelude::*;
use alloc::collections::BTreeSet;
use core::default::Default;
+use core::iter::repeat;
use sync::{Arc, Mutex};
use ln::functional_test_utils::*;
// Instantiate channel parameters where we push the maximum msats given our
// funding satoshis
let channel_value_sat = 31337; // same as funding satoshis
- let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat);
+ let channel_reserve_satoshis = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value_sat, &cfg);
let push_msat = (channel_value_sat - channel_reserve_satoshis) * 1000;
// Have node0 initiate a channel to node1 with aforementioned parameters
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+ let default_config = UserConfig::default();
// Have node0 initiate a channel to node1 with aforementioned parameters
let mut push_amt = 100_000_000;
let feerate_per_kw = 253;
let opt_anchors = false;
push_amt -= feerate_per_kw as u64 * (commitment_tx_base_weight(opt_anchors) + 4 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000 * 1000;
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
let temp_channel_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, if send_from_initiator { 0 } else { push_amt }, 42, None).unwrap();
let mut open_channel_message = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, channel_value, push_sats * 1000, InitFeatures::known(), InitFeatures::known());
let channel_id = chan.2;
let secp_ctx = Secp256k1::new();
- let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value);
+ let default_config = UserConfig::default();
+ let bs_channel_reserve_sats = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(channel_value, &default_config);
let opt_anchors = false;
fail_payment(&nodes[1], &vec!(&nodes[3], &nodes[2], &nodes[1])[..], payment_hash_2);
claim_payment(&nodes[1], &vec!(&nodes[2], &nodes[3], &nodes[1])[..], payment_preimage_1);
- // Add a duplicate new channel from 2 to 4
- let chan_5 = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known());
-
- // Send some payments across both channels
- let payment_preimage_3 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
- let payment_preimage_4 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
- let payment_preimage_5 = route_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000).0;
-
-
- route_over_limit(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], 3000000);
- let events = nodes[0].node.get_and_clear_pending_msg_events();
- assert_eq!(events.len(), 0);
- nodes[0].logger.assert_log_regex("lightning::ln::channelmanager".to_string(), regex::Regex::new(r"Cannot send value that would put us over the max HTLC value in flight our peer will accept \(\d+\)").unwrap(), 1);
-
- //TODO: Test that routes work again here as we've been notified that the channel is full
-
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_3);
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_4);
- claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[3])[..], payment_preimage_5);
-
// Close down the channels...
close_channel(&nodes[0], &nodes[1], &chan_1.2, chan_1.3, true);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
close_channel(&nodes[1], &nodes[3], &chan_4.2, chan_4.3, false);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
- close_channel(&nodes[1], &nodes[3], &chan_5.2, chan_5.3, false);
- check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
- check_closed_event!(nodes[3], 1, ClosureReason::CooperativeClosure);
}
#[test]
// We have to forward pending HTLCs twice - once tries to forward the payment forward (and
// fails), the second will process the resulting failure and fail the HTLC backward.
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let bs_fail_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+ let default_config = UserConfig::default();
let opt_anchors = false;
let mut push_amt = 100_000_000;
push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+ let default_config = UserConfig::default();
let opt_anchors = false;
// Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
// transaction fee with 0 HTLCs (183 sats)).
let mut push_amt = 100_000_000;
push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, push_amt, InitFeatures::known(), InitFeatures::known());
// Send four HTLCs to cover the initial push_msat buffer we're required to include
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+ let default_config = UserConfig::default();
let opt_anchors = false;
// Set nodes[0]'s balance such that they will consider any above-dust received HTLC to be a
// transaction fee with 0 HTLCs (183 sats)).
let mut push_amt = 100_000_000;
push_amt -= commit_tx_fee_msat(feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT as u64, opt_anchors);
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, push_amt, InitFeatures::known(), InitFeatures::known());
let dust_amt = crate::ln::channel::MIN_CHAN_DUST_LIMIT_SATOSHIS * 1000
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
-
+ let default_config = UserConfig::default();
let opt_anchors = false;
// Set the push_msat amount such that nodes[0] will not be able to afford to add even a single
// During open, we don't have a "counterparty channel reserve" to check against, so that
// requirement only comes into play on the open_channel handling side.
- push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000) * 1000;
+ push_amt -= Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config) * 1000;
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, push_amt, 42, None).unwrap();
let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
open_channel_msg.push_msat += 1;
let _ = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100000, 95000000, InitFeatures::known(), InitFeatures::known());
let channels0 = node_chanmgrs[0].list_channels();
let channels1 = node_chanmgrs[1].list_channels();
+ let default_config = UserConfig::default();
assert_eq!(channels0.len(), 1);
assert_eq!(channels1.len(), 1);
- let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100000);
+ let reserve = Channel::<EnforcingSigner>::get_holder_selected_channel_reserve_satoshis(100_000, &default_config);
assert_eq!(channels0[0].inbound_capacity_msat, 95000000 - reserve*1000);
assert_eq!(channels1[0].outbound_capacity_msat, 95000000 - reserve*1000);
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
let mut events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
- match events[1] {
+ match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
check_spends!(commitment_tx[0], chan_2.3);
nodes[2].node.fail_htlc_backwards(&payment_hash);
check_added_monitors!(nodes[2], 0);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }]);
check_added_monitors!(nodes[2], 1);
let events = nodes[2].node.get_and_clear_pending_msg_events();
}
}
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
check_added_monitors!(nodes[1], 1);
check_closed_broadcast!(nodes[1], true);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events.len(), 1);
let (_, third_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], value);
nodes[2].node.fail_htlc_backwards(&first_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: first_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// Drop the last RAA from 3 -> 2
nodes[2].node.fail_htlc_backwards(&second_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: second_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
check_added_monitors!(nodes[2], 1);
nodes[2].node.fail_htlc_backwards(&third_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::FailedPayment { payment_hash: third_payment_hash }]);
check_added_monitors!(nodes[2], 1);
let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id());
assert!(updates.update_add_htlcs.is_empty());
// commitment transaction for nodes[0] until process_pending_htlc_forwards().
check_added_monitors!(nodes[1], 1);
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), 1);
+ assert_eq!(events.len(), 2);
match events[0] {
Event::PendingHTLCsForwardable { .. } => { },
_ => panic!("Unexpected event"),
};
+ match events[1] {
+ Event::HTLCHandlingFailed { .. } => { },
+ _ => panic!("Unexpected event"),
+ }
// Deliberately don't process the pending fail-back so they all fail back at once after
// block connection just like the !deliver_bs_raa case
}
assert!(ANTI_REORG_DELAY > PAYMENT_EXPIRY_BLOCKS); // We assume payments will also expire
let events = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(events.len(), if deliver_bs_raa { 2 } else { 4 });
+ assert_eq!(events.len(), if deliver_bs_raa { 2 + (nodes.len() - 1) } else { 4 + nodes.len() });
match events[0] {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { },
_ => panic!("Unexepected event"),
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 3);
assert_eq!(node_txn[0], node_txn[1]);
connect_block(&nodes[1], &block);
}
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let htlc_timeout_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
connect_blocks(&nodes[1], 1);
if forwarded_htlc {
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
check_added_monitors!(nodes[1], 1);
let fail_commit = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(fail_commit.len(), 1);
check_added_monitors!(nodes[0], 1);
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
- let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let node_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(node_txn.len(), 1);
check_spends!(node_txn[0], chan.3);
assert_eq!(node_txn[0].output.len(), 2); // We can't force trimming of to_remote output as channel_reserve_satoshis block us to do so at channel opening
check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires
- let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let revoked_htlc_txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(revoked_htlc_txn.len(), 2);
check_spends!(revoked_htlc_txn[0], chan_1.3);
assert_eq!(revoked_htlc_txn[1].input.len(), 1);
mine_transaction(&nodes[1], &htlc_timeout_tx);
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
let htlc_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
assert!(htlc_updates.update_add_htlcs.is_empty());
assert_eq!(htlc_updates.update_fail_htlcs.len(), 1);
&[Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone()), Some(config.clone())]);
let nodes = create_network(6, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
- let chan = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
+ let _chan_0_2 = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known());
+ let _chan_1_2 = create_announced_chan_between_nodes(&nodes, 1, 2, InitFeatures::known(), InitFeatures::known());
+ let chan_2_3 = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known());
+ let chan_3_4 = create_announced_chan_between_nodes(&nodes, 3, 4, InitFeatures::known(), InitFeatures::known());
+ let chan_3_5 = create_announced_chan_between_nodes(&nodes, 3, 5, InitFeatures::known(), InitFeatures::known());
// Rebalance and check output sanity...
send_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], 500000);
send_payment(&nodes[1], &[&nodes[2], &nodes[3], &nodes[5]], 500000);
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 2);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 2);
- let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan.2).unwrap().holder_dust_limit_satoshis;
+ let ds_dust_limit = nodes[3].node.channel_state.lock().unwrap().by_id.get(&chan_2_3.2).unwrap().holder_dust_limit_satoshis;
// 0th HTLC:
let (_, payment_hash_1, _) = route_payment(&nodes[0], &[&nodes[2], &nodes[3], &nodes[4]], ds_dust_limit*1000); // not added < dust limit + HTLC tx fee
// 1st HTLC:
// Double-check that six of the new HTLC were added
// We now have six HTLCs pending over the dust limit and six HTLCs under the dust limit (ie,
// with to_local and to_remote outputs, 8 outputs and 6 HTLCs not included).
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2).len(), 1);
- assert_eq!(get_local_commitment_txn!(nodes[3], chan.2)[0].output.len(), 8);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2).len(), 1);
+ assert_eq!(get_local_commitment_txn!(nodes[3], chan_2_3.2)[0].output.len(), 8);
// Now fail back three of the over-dust-limit and three of the under-dust-limit payments in one go.
// Fail 0th below-dust, 4th above-dust, 8th above-dust, 10th below-dust HTLCs
nodes[4].node.fail_htlc_backwards(&payment_hash_5);
nodes[4].node.fail_htlc_backwards(&payment_hash_6);
check_added_monitors!(nodes[4], 0);
- expect_pending_htlcs_forwardable!(nodes[4]);
+
+ let failed_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_1 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_3 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_5 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_6 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[4], failed_destinations);
check_added_monitors!(nodes[4], 1);
let four_removes = get_htlc_update_msgs!(nodes[4], nodes[3].node.get_our_node_id());
nodes[5].node.fail_htlc_backwards(&payment_hash_2);
nodes[5].node.fail_htlc_backwards(&payment_hash_4);
check_added_monitors!(nodes[5], 0);
- expect_pending_htlcs_forwardable!(nodes[5]);
+
+ let failed_destinations_2 = vec![
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_2 },
+ HTLCDestination::FailedPayment { payment_hash: payment_hash_4 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[5], failed_destinations_2);
check_added_monitors!(nodes[5], 1);
let two_removes = get_htlc_update_msgs!(nodes[5], nodes[3].node.get_our_node_id());
nodes[3].node.handle_update_fail_htlc(&nodes[5].node.get_our_node_id(), &two_removes.update_fail_htlcs[1]);
commitment_signed_dance!(nodes[3], nodes[5], two_removes.commitment_signed, false);
- let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
-
- expect_pending_htlcs_forwardable!(nodes[3]);
+ let ds_prev_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
+
+ // After 4 and 2 removes respectively above in nodes[4] and nodes[5], nodes[3] should receive 6 PaymentForwardedFailed events
+ let failed_destinations_3 = vec![
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[4].node.get_our_node_id()), channel_id: chan_3_4.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+ HTLCDestination::NextHopChannel { node_id: Some(nodes[5].node.get_our_node_id()), channel_id: chan_3_5.2 },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations_3);
check_added_monitors!(nodes[3], 1);
let six_removes = get_htlc_update_msgs!(nodes[3], nodes[2].node.get_our_node_id());
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &six_removes.update_fail_htlcs[0]);
//
// Alternatively, we may broadcast the previous commitment transaction, which should only
// result in failures for the below-dust HTLCs, ie the 0th, 1st, 2nd, 3rd, 9th, and 10th HTLCs.
- let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan.2);
+ let ds_last_commitment_tx = get_local_commitment_txn!(nodes[3], chan_2_3.2);
if announce_latest {
mine_transaction(&nodes[2], &ds_last_commitment_tx[0]);
}
let events = nodes[2].node.get_and_clear_pending_events();
let close_event = if deliver_last_raa {
- assert_eq!(events.len(), 2);
- events[1].clone()
+ assert_eq!(events.len(), 2 + 6);
+ events.last().clone().unwrap()
} else {
assert_eq!(events.len(), 1);
- events[0].clone()
+ events.last().clone().unwrap()
};
match close_event {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
check_closed_broadcast!(nodes[2], true);
if deliver_last_raa {
expect_pending_htlcs_forwardable_from_events!(nodes[2], events[0..1], true);
+
+ let expected_destinations: Vec<HTLCDestination> = repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(3).collect();
+ expect_htlc_handling_failed_destinations!(nodes[2].node.get_and_clear_pending_events(), expected_destinations);
} else {
- expect_pending_htlcs_forwardable!(nodes[2]);
+ let expected_destinations: Vec<HTLCDestination> = if announce_latest {
+ repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(9).collect()
+ } else {
+ repeat(HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }).take(6).collect()
+ };
+
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], expected_destinations);
}
check_added_monitors!(nodes[2], 3);
let htlc_value = if use_dust { 50000 } else { 3000000 };
let (_, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], htlc_value);
nodes[1].node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
check_added_monitors!(nodes[1], 1);
let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// nodes[1]'s ChannelManager will now signal that we have HTLC forwards to process.
let process_htlc_forwards_event = nodes[1].node.get_and_clear_pending_events();
- assert_eq!(process_htlc_forwards_event.len(), 1);
+ assert_eq!(process_htlc_forwards_event.len(), 2);
match &process_htlc_forwards_event[0] {
&Event::PendingHTLCsForwardable { .. } => {},
_ => panic!("Unexpected event"),
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
+ let chan_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1000000, 1000000, InitFeatures::known(), InitFeatures::known());
let (route, our_payment_hash, _, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[2], 100000);
check_added_monitors!(nodes[1], 0);
commitment_signed_dance!(nodes[1], nodes[2], update_msg.1, false, true);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]);
let events_4 = nodes[1].node.get_and_clear_pending_msg_events();
assert_eq!(events_4.len(), 1);
// Fail one HTLC to prune it in the will-be-latest-local commitment tx
nodes[1].node.fail_htlc_backwards(&payment_hash_2);
check_added_monitors!(nodes[1], 0);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
check_added_monitors!(nodes[1], 1);
let remove = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// Note that we first have to wait a random delay before processing the receipt of the HTLC,
// and then will wait a second random delay before failing the HTLC back:
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
// Node 3 is expecting payment of 100_000 but received 10_000,
// it should fail htlc like we didn't know the preimage.
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
connect_blocks(&nodes[1], 49); // Confirm blocks until the HTLC expires (note CLTV was explicitly 50 above)
- let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap();
+ let revoked_htlc_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0);
assert_eq!(revoked_htlc_txn.len(), 3);
check_spends!(revoked_htlc_txn[1], chan.3);
connect_block(&nodes[0], &Block { header: header_129, txdata: vec![revoked_htlc_txn[0].clone(), revoked_htlc_txn[2].clone()] });
let events = nodes[0].node.get_and_clear_pending_events();
expect_pending_htlcs_forwardable_from_events!(nodes[0], events[0..1], true);
- match events[1] {
+ match events.last().unwrap() {
Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => {}
_ => panic!("Unexpected event"),
}
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).2;
- let mut guard = nodes[0].node.channel_state.lock().unwrap();
- let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
+ let per_commitment_secret;
+ let next_per_commitment_point;
+ {
+ let mut guard = nodes[0].node.channel_state.lock().unwrap();
+ let keys = guard.by_id.get_mut(&channel_id).unwrap().get_signer();
- const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
+ const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
- // Make signer believe we got a counterparty signature, so that it allows the revocation
- keys.get_enforcement_state().last_holder_commitment -= 1;
- let per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
+ // Make signer believe we got a counterparty signature, so that it allows the revocation
+ keys.get_enforcement_state().last_holder_commitment -= 1;
+ per_commitment_secret = keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER);
- // Must revoke without gaps
- keys.get_enforcement_state().last_holder_commitment -= 1;
- keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
+ // Must revoke without gaps
+ keys.get_enforcement_state().last_holder_commitment -= 1;
+ keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 1);
- keys.get_enforcement_state().last_holder_commitment -= 1;
- let next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
- &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+ keys.get_enforcement_state().last_holder_commitment -= 1;
+ next_per_commitment_point = PublicKey::from_secret_key(&Secp256k1::new(),
+ &SecretKey::from_slice(&keys.release_commitment_secret(INITIAL_COMMITMENT_NUMBER - 2)).unwrap());
+ }
nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(),
&msgs::RevokeAndACK { channel_id, per_commitment_secret, next_per_commitment_point });
let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1000000, 59000000, InitFeatures::known(), InitFeatures::known());
// Lock HTLC in both directions
- let payment_preimage = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000).0;
- route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000).0;
+ let (payment_preimage_1, _, _) = route_payment(&nodes[0], &vec!(&nodes[1])[..], 9_000_000);
+ let (_, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 9_000_000);
let revoked_local_txn = get_local_commitment_txn!(nodes[1], chan.2);
assert_eq!(revoked_local_txn[0].input.len(), 1);
assert_eq!(revoked_local_txn[0].input[0].previous_output.txid, chan.3.txid());
// Revoke local commitment tx
- claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage);
+ claim_payment(&nodes[0], &vec!(&nodes[1])[..], payment_preimage_1);
// Broadcast set of revoked txn on A
connect_blocks(&nodes[0], TEST_FINAL_CLTV + 2 - CHAN_CONFIRM_DEPTH);
- expect_pending_htlcs_forwardable_ignore!(nodes[0]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[0], vec![HTLCDestination::FailedPayment { payment_hash: payment_hash_2 }]);
assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 0);
mine_transaction(&nodes[0], &revoked_local_txn[0]);
// Assert that `node[0]`'s `ChannelUpdate` is capped at 50 percent of the `channel_value`, as
// that's the value of `node[1]`'s `holder_max_htlc_value_in_flight_msat`.
- assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_50_percent_msat));
+ assert_eq!(node_0_chan_update.contents.htlc_maximum_msat, channel_value_50_percent_msat);
// Assert that `node[1]`'s `ChannelUpdate` is capped at 30 percent of the `channel_value`, as
// that's the value of `node[0]`'s `holder_max_htlc_value_in_flight_msat`.
- assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_30_percent_msat));
+ assert_eq!(node_1_chan_update.contents.htlc_maximum_msat, channel_value_30_percent_msat);
// Assert that `node[2]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
// the value of `node[3]`'s `holder_max_htlc_value_in_flight_msat` (100%), exceeds 90% of the
// `channel_value`.
- assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+ assert_eq!(node_2_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
// Assert that `node[3]`'s `ChannelUpdate` is capped at 90 percent of the `channel_value`, as
// the value of `node[2]`'s `holder_max_htlc_value_in_flight_msat` (95%), exceeds 90% of the
// `channel_value`.
- assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, OptionalField::Present(channel_value_90_percent_msat));
+ assert_eq!(node_3_chan_update.contents.htlc_maximum_msat, channel_value_90_percent_msat);
}
#[test]
// `MessageSendEvent::SendAcceptChannel` event. The message is passed to `nodes[0]`
// `handle_accept_channel`, which is required in order for `create_funding_transaction` to
// succeed when `nodes[0]` is passed to it.
- {
+ let accept_chan_msg = {
let mut lock;
let channel = get_channel_ref!(&nodes[1], lock, temp_channel_id);
- let accept_chan_msg = channel.get_accept_channel_message();
- nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
- }
+ channel.get_accept_channel_message()
+ };
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_chan_msg);
let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42);
// All the below cases should end up being handled exactly identically, so we macro the
// resulting events.
macro_rules! handle_unknown_invalid_payment_data {
- () => {
+ ($payment_hash: expr) => {
check_added_monitors!(nodes[0], 1);
let mut events = nodes[0].node.get_and_clear_pending_msg_events();
let payment_event = SendEvent::from_event(events.pop().unwrap());
// We have to forward pending HTLCs once to process the receipt of the HTLC and then
// again to process the pending backwards-failure of the HTLC
expect_pending_htlcs_forwardable!(nodes[1]);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment{ payment_hash: $payment_hash }]);
check_added_monitors!(nodes[1], 1);
// We should fail the payment back
// Send a payment with the right payment hash but the wrong payment secret
nodes[0].node.send_payment(&route, our_payment_hash, &Some(random_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(our_payment_hash);
expect_payment_failed!(nodes[0], our_payment_hash, true, expected_error_code, expected_error_data);
// Send a payment with a random payment hash, but the right payment secret
nodes[0].node.send_payment(&route, random_payment_hash, &Some(our_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(random_payment_hash);
expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
// Send a payment with a random payment hash and random payment secret
nodes[0].node.send_payment(&route, random_payment_hash, &Some(random_payment_secret)).unwrap();
- handle_unknown_invalid_payment_data!();
+ handle_unknown_invalid_payment_data!(random_payment_hash);
expect_payment_failed!(nodes[0], random_payment_hash, true, expected_error_code, expected_error_data);
}
// additional block built on top of the current chain.
nodes[1].chain_monitor.chain_monitor.transactions_confirmed(
&nodes[1].get_block_header(conf_height + 1), &[(0, &spending_txn[1])], conf_height + 1);
- expect_pending_htlcs_forwardable!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: channel_id }]);
check_added_monitors!(nodes[1], 1);
let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
// Now we go fail back the first HTLC from the user end.
nodes[1].node.fail_htlc_backwards(&our_payment_hash);
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ let expected_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+ HTLCDestination::FailedPayment { payment_hash: our_payment_hash },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], expected_destinations);
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
if let Event::PaymentPathFailed { .. } = failure_events[1] {} else { panic!(); }
} else {
// Let the second HTLC fail and claim the first
- expect_pending_htlcs_forwardable_ignore!(nodes[1]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
nodes[1].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[1], 1);
create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
- create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
+ let chan_2_3 =create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known());
let payment_params = PaymentParameters::from_node_id(nodes[3].node.get_our_node_id())
.with_features(InvoiceFeatures::known());
}
expect_pending_htlcs_forwardable_ignore!(nodes[3]);
nodes[3].node.process_pending_htlc_forwards();
- expect_pending_htlcs_forwardable_ignore!(nodes[3]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed_ignore!(nodes[3], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
nodes[3].node.process_pending_htlc_forwards();
check_added_monitors!(nodes[3], 1);
nodes[2].node.handle_update_fail_htlc(&nodes[3].node.get_our_node_id(), &fail_updates_1.update_fail_htlcs[0]);
commitment_signed_dance!(nodes[2], nodes[3], fail_updates_1.commitment_signed, false);
- expect_pending_htlcs_forwardable!(nodes[2]);
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[3].node.get_our_node_id()), channel_id: chan_2_3.2 }]);
check_added_monitors!(nodes[2], 1);
let fail_updates_2 = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
connect_blocks(&nodes[3], TEST_FINAL_CLTV);
connect_blocks(&nodes[0], TEST_FINAL_CLTV); // To get the same height for sending later
- expect_pending_htlcs_forwardable!(nodes[3]);
+ let failed_destinations = vec![
+ HTLCDestination::FailedPayment { payment_hash },
+ HTLCDestination::FailedPayment { payment_hash },
+ ];
+ expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[3], failed_destinations);
pass_failed_payment_back(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_hash);