use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
-use bitcoin::hashes::sha256d::Hash as Sha256dHash;
use bitcoin::hash_types::{BlockHash, Txid};
use bitcoin::secp256k1::{SecretKey,PublicKey};
use bitcoin::secp256k1::Secp256k1;
-use bitcoin::secp256k1::ecdh::SharedSecret;
use bitcoin::{LockTime, secp256k1, Sequence};
use crate::chain;
#[cfg(any(feature = "_test_utils", test))]
use crate::ln::features::InvoiceFeatures;
use crate::routing::gossip::NetworkGraph;
-use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RoutePath, Router};
+use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router};
use crate::routing::scoring::ProbabilisticScorer;
use crate::ln::msgs;
use crate::ln::onion_utils;
use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
#[cfg(test)]
use crate::ln::outbound_payment;
-use crate::ln::outbound_payment::{OutboundPayments, PendingOutboundPayment};
+use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
use crate::ln::wire::Encode;
-use crate::chain::keysinterface::{EntropySource, KeysInterface, KeysManager, NodeSigner, Recipient, Sign, SignerProvider};
+use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner};
use crate::util::config::{UserConfig, ChannelConfig};
use crate::util::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination};
use crate::util::events;
use core::{cmp, mem};
use core::cell::RefCell;
use crate::io::Read;
-use crate::sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard, FairRwLock};
+use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
use core::sync::atomic::{AtomicUsize, Ordering};
use core::time::Duration;
use core::ops::Deref;
// Re-export this for use in the public API.
-pub use crate::ln::outbound_payment::PaymentSendFailure;
+pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry};
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
//
first_hop_htlc_msat: u64,
payment_id: PaymentId,
payment_secret: Option<PaymentSecret>,
+ /// Note that this is now "deprecated" - we write it for forwards (and read it for
+ /// backwards) compatibility reasons, but prefer to use the data in the
+ /// [`super::outbound_payment`] module, which stores per-payment data once instead of in
+ /// each HTLC.
payment_params: Option<PaymentParameters>,
},
}
msg: &'static str,
}
+/// This enum is used to specify which error data to send to peers when failing back an HTLC
+/// using [`ChannelManager::fail_htlc_backwards_with_reason`].
+///
+/// For more info on failure codes, see <https://github.com/lightning/bolts/blob/master/04-onion-routing.md#failure-messages>.
+#[derive(Clone, Copy)]
+pub enum FailureCode {
+ /// We had a temporary error processing the payment. Useful if no other error codes fit
+ /// and you want to indicate that the payer may want to retry.
+ TemporaryNodeFailure = 0x2000 | 2,
+ /// We have a required feature which was not in this onion. For example, you may require
+ /// some additional metadata that was not provided with this payment.
+ RequiredNodeFeatureMissing = 0x4000 | 0x2000 | 3,
+ /// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of
+ /// the HTLC is too close to the current block height for safe handling.
+ /// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is
+ /// equivalent to calling [`ChannelManager::fail_htlc_backwards`].
+ IncorrectOrUnknownPaymentDetails = 0x4000 | 15,
+}
+
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
/// Error type returned across the peer_state mutex boundary. When an Err is generated for a
/// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
/// This provides some limited amount of privacy. Ideally this would range from somewhere like one
/// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
-const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
+pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
/// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
/// be sent in the order they appear in the return value, however sometimes the order needs to be
pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
}
-// Note this is only exposed in cfg(test):
-pub(super) struct ChannelHolder {
- /// Messages to send to peers - pushed to in the same lock that they are generated in (except
- /// for broadcast messages, where ordering isn't as strict).
- pub(super) pending_msg_events: Vec<MessageSendEvent>,
-}
-
/// Events which we process internally but cannot be procsesed immediately at the generation site
/// for some reason. They are handled in timer_tick_occurred, so may be processed with
/// quite some time lag.
}
/// State we hold per-peer.
-pub(super) struct PeerState<Signer: Sign> {
+pub(super) struct PeerState<Signer: ChannelSigner> {
/// `temporary_channel_id` or `channel_id` -> `channel`.
///
/// Holds all channels where the peer is the counterparty. Once a channel has been assigned a
pub(super) channel_by_id: HashMap<[u8; 32], Channel<Signer>>,
/// The latest `InitFeatures` we heard from the peer.
latest_features: InitFeatures,
+ /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
+ /// for broadcast messages, where ordering isn't as strict).
+ pub(super) pending_msg_events: Vec<MessageSendEvent>,
+ /// The peer is currently connected (i.e. we've seen a
+ /// [`ChannelMessageHandler::peer_connected`] and no corresponding
+ /// [`ChannelMessageHandler::peer_disconnected`].
+ is_connected: bool,
+}
+
+impl <Signer: ChannelSigner> PeerState<Signer> {
+ /// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
+ /// If true is passed for `require_disconnected`, the function will return false if we haven't
+ /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
+ fn ok_to_remove(&self, require_disconnected: bool) -> bool {
+ if require_disconnected && self.is_connected {
+ return false
+ }
+ self.channel_by_id.len() == 0
+ }
}
/// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
Arc<M>,
Arc<T>,
Arc<KeysManager>,
+ Arc<KeysManager>,
+ Arc<KeysManager>,
Arc<F>,
Arc<DefaultRouter<
Arc<NetworkGraph<Arc<L>>>,
/// type alias chooses the concrete types of KeysManager and DefaultRouter.
///
/// (C-not exported) as Arcs don't make sense in bindings
-pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
+pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
/// Manager which keeps track of a number of channels and sends messages to the appropriate
/// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
// | |
// | |__`pending_intercepted_htlcs`
// |
-// |__`pending_inbound_payments`
-// | |
-// | |__`claimable_payments`
+// |__`per_peer_state`
// | |
-// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
+// | |__`pending_inbound_payments`
// | |
-// | |__`channel_state`
+// | |__`claimable_payments`
+// | |
+// | |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
// | |
-// | |__`per_peer_state`
+// | |__`peer_state`
// | |
-// | |__`peer_state`
-// | |
-// | |__`id_to_peer`
-// | |
-// | |__`short_to_chan_info`
-// | |
-// | |__`outbound_scid_aliases`
-// | |
-// | |__`best_block`
+// | |__`id_to_peer`
+// | |
+// | |__`short_to_chan_info`
+// | |
+// | |__`outbound_scid_aliases`
+// | |
+// | |__`best_block`
+// | |
+// | |__`pending_events`
// | |
-// | |__`pending_events`
-// | |
-// | |__`pending_background_events`
+// | |__`pending_background_events`
//
-pub struct ChannelManager<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
best_block: RwLock<BestBlock>,
secp_ctx: Secp256k1<secp256k1::All>,
- /// See `ChannelManager` struct-level documentation for lock order requirements.
- #[cfg(any(test, feature = "_test_utils"))]
- pub(super) channel_state: Mutex<ChannelHolder>,
- #[cfg(not(any(test, feature = "_test_utils")))]
- channel_state: Mutex<ChannelHolder>,
-
/// Storage for PaymentSecrets and any requirements on future inbound payments before we will
/// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements
/// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
/// the corresponding channel for the event, as we only have access to the `channel_id` during
/// the handling of the events.
///
+ /// Note that no consistency guarantees are made about the existence of a peer with the
+ /// `counterparty_node_id` in our other maps.
+ ///
/// TODO:
/// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
/// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
#[cfg(not(test))]
short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
- our_network_key: SecretKey,
our_network_pubkey: PublicKey,
inbound_payment_key: inbound_payment::ExpandedKey,
/// very far in the past, and can only ever be up to two hours in the future.
highest_seen_timestamp: AtomicUsize,
- /// The bulk of our storage will eventually be here (message queues and the like). Currently
- /// the `per_peer_state` stores our channels on a per-peer basis, as well as the peer's latest
- /// features.
+ /// The bulk of our storage. Currently the `per_peer_state` stores our channels on a per-peer
+ /// basis, as well as the peer's latest features.
///
/// If we are connected to a peer we always at least have an entry here, even if no channels
/// are currently open with that peer.
///
/// See `ChannelManager` struct-level documentation for lock order requirements.
#[cfg(not(any(test, feature = "_test_utils")))]
- per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>>>,
+ per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
#[cfg(any(test, feature = "_test_utils"))]
- pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>>>,
+ pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
/// See `ChannelManager` struct-level documentation for lock order requirements.
pending_events: Mutex<Vec<events::Event>>,
persistence_notifier: Notifier,
- keys_manager: K,
+ entropy_source: ES,
+ node_signer: NS,
+ signer_provider: SP,
logger: L,
}
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
/// Minimum CLTV difference between the current block height and received inbound payments.
-/// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least
+/// Invoices generated for payment to us must set their `min_final_cltv_expiry_delta` field to at least
/// this value.
// Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
// any payments to succeed. Further, we don't want payments to fail if a block was found while
// a payment was being routed, so we add an extra block to be safe.
-pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3;
+pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
// ie that if the next-hop peer fails the HTLC within
}
}
+/// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
+/// These include payments that have yet to find a successful path, or have unresolved HTLCs.
+#[derive(Debug, PartialEq)]
+pub enum RecentPaymentDetails {
+ /// When a payment is still being sent and awaiting successful delivery.
+ Pending {
+ /// Hash of the payment that is currently being sent but has yet to be fulfilled or
+ /// abandoned.
+ payment_hash: PaymentHash,
+ /// Total amount (in msat, excluding fees) across all paths for this payment,
+ /// not just the amount currently inflight.
+ total_msat: u64,
+ },
+ /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
+ /// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
+ /// payment is removed from tracking.
+ Fulfilled {
+ /// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
+ /// made before LDK version 0.0.104.
+ payment_hash: Option<PaymentHash>,
+ },
+ /// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly
+ /// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
+ /// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
+ Abandoned {
+ /// Hash of the payment that we have given up trying to send.
+ payment_hash: PaymentHash,
+ },
+}
+
/// Route hints used in constructing invoices for [phantom node payents].
///
/// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
match $internal {
Ok(msg) => Ok(msg),
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
- #[cfg(debug_assertions)]
- {
- // In testing, ensure there are no deadlocks where the lock is already held upon
- // entering the macro.
- assert!($self.channel_state.try_lock().is_ok());
- assert!($self.pending_events.try_lock().is_ok());
- }
+ // In testing, ensure there are no deadlocks where the lock is already held upon
+ // entering the macro.
+ debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
+ debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
let mut msg_events = Vec::with_capacity(2);
}
if !msg_events.is_empty() {
- $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events);
+ let per_peer_state = $self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ peer_state.pending_msg_events.append(&mut msg_events);
+ }
}
// Return error in case higher-API need one
}
}
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
/// Users need to notify the new ChannelManager when a new block is connected or
/// disconnected using its `block_connected` and `block_disconnected` methods, starting
/// from after `params.latest_hash`.
- pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self {
+ pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
- let inbound_pmt_key_material = keys_manager.get_inbound_payment_key_material();
+ secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
+ let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material();
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
ChannelManager {
default_configuration: config.clone(),
best_block: RwLock::new(params.best_block),
- channel_state: Mutex::new(ChannelHolder{
- pending_msg_events: Vec::new(),
- }),
outbound_scid_aliases: Mutex::new(HashSet::new()),
pending_inbound_payments: Mutex::new(HashMap::new()),
pending_outbound_payments: OutboundPayments::new(),
id_to_peer: Mutex::new(HashMap::new()),
short_to_chan_info: FairRwLock::new(HashMap::new()),
- our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
- our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()),
+ our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
secp_ctx,
inbound_payment_key: expanded_inbound_key,
- fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(),
+ fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
- probing_cookie_secret: keys_manager.get_secure_random_bytes(),
+ probing_cookie_secret: entropy_source.get_secure_random_bytes(),
highest_seen_timestamp: AtomicUsize::new(0),
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),
- keys_manager,
+ entropy_source,
+ node_signer,
+ signer_provider,
logger,
}
if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
outbound_scid_alias += 1;
} else {
- outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
+ outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
}
if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
break;
// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
debug_assert!(&self.total_consistency_lock.try_write().is_err());
- let mut channel_state = self.channel_state.lock().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
- match per_peer_state.get(&their_network_key) {
- None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
- Some(peer_state_mutex) => {
- let mut peer_state = peer_state_mutex.lock().unwrap();
- let channel = {
- let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
- let their_features = &peer_state.latest_features;
- let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
- match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key,
- their_features, channel_value_satoshis, push_msat, user_channel_id, config,
- self.best_block.read().unwrap().height(), outbound_scid_alias)
- {
- Ok(res) => res,
- Err(e) => {
- self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
- return Err(e);
- },
- }
- };
- let res = channel.get_open_channel(self.genesis_hash.clone());
+ let peer_state_mutex = per_peer_state.get(&their_network_key)
+ .ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
+
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ let channel = {
+ let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
+ let their_features = &peer_state.latest_features;
+ let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
+ match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
+ their_features, channel_value_satoshis, push_msat, user_channel_id, config,
+ self.best_block.read().unwrap().height(), outbound_scid_alias)
+ {
+ Ok(res) => res,
+ Err(e) => {
+ self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
+ return Err(e);
+ },
+ }
+ };
+ let res = channel.get_open_channel(self.genesis_hash.clone());
- let temporary_channel_id = channel.channel_id();
- match peer_state.channel_by_id.entry(temporary_channel_id) {
- hash_map::Entry::Occupied(_) => {
- if cfg!(fuzzing) {
- return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
- } else {
- panic!("RNG is bad???");
- }
- },
- hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
+ let temporary_channel_id = channel.channel_id();
+ match peer_state.channel_by_id.entry(temporary_channel_id) {
+ hash_map::Entry::Occupied(_) => {
+ if cfg!(fuzzing) {
+ return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
+ } else {
+ panic!("RNG is bad???");
}
-
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
- node_id: their_network_key,
- msg: res,
- });
- Ok(temporary_channel_id)
},
+ hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
}
+
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: their_network_key,
+ msg: res,
+ });
+ Ok(temporary_channel_id)
}
- fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<K::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
- let mut res = Vec::new();
+ fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
// Allocate our best estimate of the number of channels we have in the `res`
// Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
// a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
// of the ChannelMonitor handling. Therefore reallocations may still occur, but is
// unlikely as the `short_to_chan_info` map often contains 2 entries for
// the same channel.
- res.reserve(self.short_to_chan_info.read().unwrap().len());
+ let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
{
let best_block_height = self.best_block.read().unwrap().height();
let per_peer_state = self.per_peer_state.read().unwrap();
self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
}
+ /// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
+ /// successful path, or have unresolved HTLCs.
+ ///
+ /// This can be useful for payments that may have been prepared, but ultimately not sent, as a
+ /// result of a crash. If such a payment exists, is not listed here, and an
+ /// [`Event::PaymentSent`] has not been received, you may consider resending the payment.
+ ///
+ /// [`Event::PaymentSent`]: events::Event::PaymentSent
+ pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
+ self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
+ .filter_map(|(_, pending_outbound_payment)| match pending_outbound_payment {
+ PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
+ Some(RecentPaymentDetails::Pending {
+ payment_hash: *payment_hash,
+ total_msat: *total_msat,
+ })
+ },
+ PendingOutboundPayment::Abandoned { payment_hash, .. } => {
+ Some(RecentPaymentDetails::Abandoned { payment_hash: *payment_hash })
+ },
+ PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
+ Some(RecentPaymentDetails::Fulfilled { payment_hash: *payment_hash })
+ },
+ PendingOutboundPayment::Legacy { .. } => None
+ })
+ .collect()
+ }
+
/// Helper function that issues the channel close events
- fn issue_channel_close_events(&self, channel: &Channel<<K::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
+ fn issue_channel_close_events(&self, channel: &Channel<<SP::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
let mut pending_events_lock = self.pending_events.lock().unwrap();
match channel.unbroadcasted_funding() {
Some(transaction) => {
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
- return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
- }
- let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.keys_manager, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
- failed_htlcs = htlcs;
-
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update {
- let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
- let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, chan_entry);
- break result;
- }
+
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ let (shutdown_msg, monitor_update, htlcs) = chan_entry.get_mut().get_shutdown(&self.signer_provider, &peer_state.latest_features, target_feerate_sats_per_1000_weight)?;
+ failed_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
+ let (result, is_permanent) =
+ handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ if is_permanent {
+ remove_channel!(self, chan_entry);
+ break result;
}
+ }
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg: shutdown_msg
- });
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg: shutdown_msg
+ });
- if chan_entry.get().is_shutdown() {
- let channel = remove_channel!(self, chan_entry);
- if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: channel_update
- });
- }
- self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+ if chan_entry.get().is_shutdown() {
+ let channel = remove_channel!(self, chan_entry);
+ if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: channel_update
+ });
}
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() })
- }
- } else {
- return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) });
+ self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
+ }
+ break Ok(());
+ },
+ hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
}
};
// force-closing. The monitor update on the required in-memory copy should broadcast
// the latest local state, which is the best we can do anyway. Thus, it is safe to
// ignore the result here.
- let _ = self.chain_monitor.update_channel(funding_txo, monitor_update);
+ let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
}
}
/// user closes, which will be re-exposed as the `ChannelClosed` reason.
fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
-> Result<PublicKey, APIError> {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(peer_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
let mut chan = {
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(peer_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
- if chan.get().get_counterparty_node_id() != *peer_node_id {
- return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
- }
- if let Some(peer_msg) = peer_msg {
- self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
- } else {
- self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
- }
- remove_channel!(self, chan)
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
+ if let Some(peer_msg) = peer_msg {
+ self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
} else {
- return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
+ self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
}
+ remove_channel!(self, chan)
} else {
- return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", peer_node_id) });
+ return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
}
};
log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
self.finish_force_close_channel(chan.force_shutdown(broadcast));
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
Ok(counterparty_node_id) => {
- self.channel_state.lock().unwrap().pending_msg_events.push(
- events::MessageSendEvent::HandleError {
- node_id: counterparty_node_id,
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
- },
- }
- );
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
+ let mut peer_state = peer_state_mutex.lock().unwrap();
+ peer_state.pending_msg_events.push(
+ events::MessageSendEvent::HandleError {
+ node_id: counterparty_node_id,
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
+ },
+ }
+ );
+ }
Ok(())
},
Err(e) => Err(e)
// final_expiry_too_soon
// We have to have some headroom to broadcast on chain if we have the preimage, so make sure
// we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
+ //
// Also, ensure that, in the case of an unknown preimage for the received payment hash, our
// payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
// channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
}
- let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes();
+ let shared_secret = self.node_signer.ecdh(
+ Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None
+ ).unwrap().secret_bytes();
if msg.onion_routing_packet.version != 0 {
//TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
// short_channel_id is non-0 in any ::Forward.
if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
if let Some((err, mut code, chan_update)) = loop {
- let id_option = self.short_to_chan_info.read().unwrap().get(&short_channel_id).cloned();
+ let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
let forwarding_chan_info_opt = match id_option {
None => { // unknown_next_peer
// Note that this is likely a timing oracle for detecting whether an scid is a
};
let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
let per_peer_state = self.per_peer_state.read().unwrap();
- if let None = per_peer_state.get(&counterparty_node_id) {
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
}
- let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap();
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
None => {
/// public, and thus should be called whenever the result is going to be passed out in a
/// [`MessageSendEvent::BroadcastChannelUpdate`] event.
///
- /// May be called with peer_state already locked!
- fn get_channel_update_for_broadcast(&self, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ /// Note that in `internal_closing_signed`, this function is called without the `peer_state`
+ /// corresponding to the channel's counterparty locked, as the channel been removed from the
+ /// storage and the `peer_state` lock has been dropped.
+ fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
if !chan.should_announce() {
return Err(LightningError {
err: "Cannot broadcast a channel_update for a private channel".to_owned(),
/// is public (only returning an Err if the channel does not yet have an assigned short_id),
/// and thus MUST NOT be called unless the recipient of the resulting message has already
/// provided evidence that they know about the existence of the channel.
- /// May be called with peer_state already locked!
- fn get_channel_update_for_unicast(&self, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ ///
+ /// Note that through `internal_closing_signed`, this function is called without the
+ /// `peer_state` corresponding to the channel's counterparty locked, as the channel been
+ /// removed from the storage and the `peer_state` lock has been dropped.
+ fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
self.get_channel_update_for_onion(short_channel_id, chan)
}
- fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
+ fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
- let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
+ let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
let unsigned = msgs::UnsignedChannelUpdate {
chain_hash: self.genesis_hash,
fee_proportional_millionths: chan.get_fee_proportional_millionths(),
excess_data: Vec::new(),
};
-
- let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
- let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
+ // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
+ // If we returned an error and the `node_signer` cannot provide a signature for whatever
+ // reason`, we wouldn't be able to receive inbound payments through the corresponding
+ // channel.
+ let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
Ok(msgs::ChannelUpdate {
signature: sig,
// Only public for testing, this should otherwise never be called direcly
pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
- let prng_seed = self.keys_manager.get_secure_random_bytes();
+ let prng_seed = self.entropy_source.get_secure_random_bytes();
let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
};
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
- match {
- if !chan.get().is_live() {
- return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
+ let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
+ .ok_or_else(|| APIError::InvalidRoute{err: "No peer matching the path's first hop found!" })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
+ match {
+ if !chan.get().is_live() {
+ return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
+ }
+ break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
+ htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
+ path: path.clone(),
+ session_priv: session_priv.clone(),
+ first_hop_htlc_msat: htlc_msat,
+ payment_id,
+ payment_secret: payment_secret.clone(),
+ payment_params: payment_params.clone(),
+ }, onion_packet, &self.logger),
+ chan)
+ } {
+ Some((update_add, commitment_signed, monitor_update)) => {
+ let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
+ let chan_id = chan.get().channel_id();
+ match (update_err,
+ handle_monitor_update_res!(self, update_err, chan,
+ RAACommitmentOrder::CommitmentFirst, false, true))
+ {
+ (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
+ (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
+ (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
+ // Note that MonitorUpdateInProgress here indicates (per function
+ // docs) that we will resend the commitment update once monitor
+ // updating completes. Therefore, we must return an error
+ // indicating that it is unsafe to retry the payment wholesale,
+ // which we do in the send_payment check for
+ // MonitorUpdateInProgress, below.
+ return Err(APIError::MonitorUpdateInProgress);
+ },
+ _ => unreachable!(),
}
- break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
- htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
- path: path.clone(),
- session_priv: session_priv.clone(),
- first_hop_htlc_msat: htlc_msat,
- payment_id,
- payment_secret: payment_secret.clone(),
- payment_params: payment_params.clone(),
- }, onion_packet, &self.logger),
- chan)
- } {
- Some((update_add, commitment_signed, monitor_update)) => {
- let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
- let chan_id = chan.get().channel_id();
- match (update_err,
- handle_monitor_update_res!(self, update_err, chan,
- RAACommitmentOrder::CommitmentFirst, false, true))
- {
- (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e),
- (ChannelMonitorUpdateStatus::Completed, Ok(())) => {},
- (ChannelMonitorUpdateStatus::InProgress, Err(_)) => {
- // Note that MonitorUpdateInProgress here indicates (per function
- // docs) that we will resend the commitment update once monitor
- // updating completes. Therefore, we must return an error
- // indicating that it is unsafe to retry the payment wholesale,
- // which we do in the send_payment check for
- // MonitorUpdateInProgress, below.
- return Err(APIError::MonitorUpdateInProgress);
- },
- _ => unreachable!(),
- }
- log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: path.first().unwrap().pubkey,
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: vec![update_add],
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- },
- });
- },
- None => { },
- }
- } else {
- // The channel was likely removed after we fetched the id from the
- // `short_to_chan_info` map, but before we successfully locked the
- // `channel_by_id` map.
- // This can occur as no consistency guarantees exists between the two maps.
- return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
+ log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id));
+ peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: path.first().unwrap().pubkey,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: vec![update_add],
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ },
+ });
+ },
+ None => { },
}
- } else { return Err(APIError::InvalidRoute{err: "No peer matching the path's first hop found!" })}
+ } else {
+ // The channel was likely removed after we fetched the id from the
+ // `short_to_chan_info` map, but before we successfully locked the
+ // `channel_by_id` map.
+ // This can occur as no consistency guarantees exists between the two maps.
+ return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
+ }
return Ok(());
};
/// Sends a payment along a given route.
///
- /// Value parameters are provided via the last hop in route, see documentation for RouteHop
+ /// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`]
/// fields for more info.
///
+ /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via
+ /// [`PeerManager::process_events`]).
+ ///
+ /// # Avoiding Duplicate Payments
+ ///
/// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
/// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment
/// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an
- /// [`Event::PaymentSent`]) LDK will not stop you from sending a second payment with the same
- /// [`PaymentId`].
+ /// [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a
+ /// second payment with the same [`PaymentId`].
///
/// Thus, in order to ensure duplicate payments are not sent, you should implement your own
/// tracking of payments, including state to indicate once a payment has completed. Because you
/// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the
/// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes.
///
- /// May generate SendHTLCs message(s) event on success, which should be relayed (e.g. via
- /// [`PeerManager::process_events`]).
+ /// Additionally, in the scenario where we begin the process of sending a payment, but crash
+ /// before `send_payment` returns (or prior to [`ChannelMonitorUpdate`] persistence if you're
+ /// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See
+ /// [`ChannelManager::list_recent_payments`] for more information.
+ ///
+ /// # Possible Error States on [`PaymentSendFailure`]
///
/// Each path may have a different return value, and PaymentSendValue may return a Vec with
/// each entry matching the corresponding-index entry in the route paths, see
- /// PaymentSendFailure for more info.
+ /// [`PaymentSendFailure`] for more info.
///
/// In general, a path may raise:
/// * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee,
/// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
/// different route unless you intend to pay twice!
///
- /// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate
- /// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For
- /// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route
- /// must not contain multiple paths as multi-path payments require a recipient-provided
- /// payment_secret.
+ /// # A caution on `payment_secret`
+ ///
+ /// `payment_secret` is unrelated to `payment_hash` (or [`PaymentPreimage`]) and exists to
+ /// authenticate the sender to the recipient and prevent payment-probing (deanonymization)
+ /// attacks. For newer nodes, it will be provided to you in the invoice. If you do not have one,
+ /// the [`Route`] must not contain multiple paths as multi-path payments require a
+ /// recipient-provided `payment_secret`.
///
- /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
- /// bit set (either as required or as available). If multiple paths are present in the Route,
- /// we assume the invoice had the basic_mpp feature set.
+ /// If a `payment_secret` *is* provided, we assume that the invoice had the payment_secret
+ /// feature bit set (either as required or as available). If multiple paths are present in the
+ /// [`Route`], we assume the invoice had the basic_mpp feature set.
///
/// [`Event::PaymentSent`]: events::Event::PaymentSent
+ /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
+ /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
self.pending_outbound_payments
- .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.keys_manager, best_block_height,
+ .send_payment_with_route(route, payment_hash, payment_secret, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
+ |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ }
+
+ /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
+ /// `route_params` and retry failed payment paths based on `retry_strategy`.
+ pub fn send_payment_with_retry(&self, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), PaymentSendFailure> {
+ let best_block_height = self.best_block.read().unwrap().height();
+ self.pending_outbound_payments
+ .send_payment(payment_hash, payment_secret, payment_id, retry_strategy, route_params,
+ &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
+ &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
|path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.keys_manager, best_block_height,
+ self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, payment_secret, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
|path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
#[cfg(test)]
pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, payment_secret: Option<PaymentSecret>, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, &self.keys_manager, best_block_height)
+ self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, payment_secret, payment_id, route, None, &self.entropy_source, best_block_height)
}
- /// Retries a payment along the given [`Route`].
- ///
- /// Errors returned are a superset of those returned from [`send_payment`], so see
- /// [`send_payment`] documentation for more details on errors. This method will also error if the
- /// retry amount puts the payment more than 10% over the payment's total amount, if the payment
- /// for the given `payment_id` cannot be found (likely due to timeout or success), or if
- /// further retries have been disabled with [`abandon_payment`].
- ///
- /// [`send_payment`]: [`ChannelManager::send_payment`]
- /// [`abandon_payment`]: [`ChannelManager::abandon_payment`]
- pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
- let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.retry_payment_with_route(route, payment_id, &self.keys_manager, best_block_height,
- |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
- self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
- }
-
- /// Signals that no further retries for the given payment will occur.
+ /// Signals that no further retries for the given payment should occur. Useful if you have a
+ /// pending outbound payment with retries remaining, but wish to stop retrying the payment before
+ /// retries are exhausted.
///
- /// After this method returns, no future calls to [`retry_payment`] for the given `payment_id`
- /// are allowed. If no [`Event::PaymentFailed`] event had been generated before, one will be
- /// generated as soon as there are no remaining pending HTLCs for this payment.
+ /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
+ /// as there are no remaining pending HTLCs for this payment.
///
/// Note that calling this method does *not* prevent a payment from succeeding. You must still
/// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
/// determine the ultimate status of a payment.
///
/// If an [`Event::PaymentFailed`] event is generated and we restart without this
- /// [`ChannelManager`] having been persisted, the payment may still be in the pending state
- /// upon restart. This allows further calls to [`retry_payment`] (and requiring a second call
- /// to [`abandon_payment`] to mark the payment as failed again). Otherwise, future calls to
- /// [`retry_payment`] will fail with [`PaymentSendFailure::ParameterError`].
+ /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated.
///
- /// [`abandon_payment`]: Self::abandon_payment
- /// [`retry_payment`]: Self::retry_payment
/// [`Event::PaymentFailed`]: events::Event::PaymentFailed
/// [`Event::PaymentSent`]: events::Event::PaymentSent
pub fn abandon_payment(&self, payment_id: PaymentId) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- if let Some(payment_failed_ev) = self.pending_outbound_payments.abandon_payment(payment_id) {
- self.pending_events.lock().unwrap().push(payment_failed_ev);
- }
+ self.pending_outbound_payments.abandon_payment(payment_id, &self.pending_events);
}
/// Send a spontaneous payment, which is a payment that does not require the recipient to have
/// [`send_payment`]: Self::send_payment
pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.send_spontaneous_payment(route, payment_preimage, payment_id, &self.keys_manager, best_block_height,
+ self.pending_outbound_payments.send_spontaneous_payment_with_route(
+ route, payment_preimage, payment_id, &self.entropy_source, &self.node_signer,
+ best_block_height,
+ |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
+ }
+
+ /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
+ /// based on `route_params` and retry failed payment paths based on `retry_strategy`.
+ ///
+ /// See [`PaymentParameters::for_keysend`] for help in constructing `route_params` for spontaneous
+ /// payments.
+ ///
+ /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
+ pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, PaymentSendFailure> {
+ let best_block_height = self.best_block.read().unwrap().height();
+ self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, payment_id,
+ retry_strategy, route_params, &self.router, self.list_usable_channels(),
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
+ &self.logger,
|path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// us to easily discern them from real payments.
pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
let best_block_height = self.best_block.read().unwrap().height();
- self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.keys_manager, best_block_height,
+ self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
|path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv))
}
/// Handles the generation of a funding transaction, optionally (for tests) with a function
/// which checks the correctness of the funding transaction given the associated channel.
- fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<K::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
+ fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<SP::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
) -> Result<(), APIError> {
- let mut channel_state = self.channel_state.lock().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- let (chan, msg) = {
- let (res, chan) = {
- match peer_state.channel_by_id.remove(temporary_channel_id) {
- Some(mut chan) => {
- let funding_txo = find_funding_output(&chan, &funding_transaction)?;
-
- (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
- .map_err(|e| if let ChannelError::Close(msg) = e {
- MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
- } else { unreachable!(); })
- , chan)
- },
- None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
- }
- };
- match handle_error!(self, res, chan.get_counterparty_node_id()) {
- Ok(funding_msg) => {
- (chan, funding_msg)
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let (chan, msg) = {
+ let (res, chan) = {
+ match peer_state.channel_by_id.remove(temporary_channel_id) {
+ Some(mut chan) => {
+ let funding_txo = find_funding_output(&chan, &funding_transaction)?;
+
+ (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
+ .map_err(|e| if let ChannelError::Close(msg) = e {
+ MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
+ } else { unreachable!(); })
+ , chan)
},
- Err(_) => { return Err(APIError::ChannelUnavailable {
- err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
- }) },
+ None => { return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) }) },
}
};
-
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
- node_id: chan.get_counterparty_node_id(),
- msg,
- });
- mem::drop(channel_state);
- match peer_state.channel_by_id.entry(chan.channel_id()) {
- hash_map::Entry::Occupied(_) => {
- panic!("Generated duplicate funding txid?");
+ match handle_error!(self, res, chan.get_counterparty_node_id()) {
+ Ok(funding_msg) => {
+ (chan, funding_msg)
},
- hash_map::Entry::Vacant(e) => {
- let mut id_to_peer = self.id_to_peer.lock().unwrap();
- if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
- panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
- }
- e.insert(chan);
+ Err(_) => { return Err(APIError::ChannelUnavailable {
+ err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
+ }) },
+ }
+ };
+
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
+ node_id: chan.get_counterparty_node_id(),
+ msg,
+ });
+ match peer_state.channel_by_id.entry(chan.channel_id()) {
+ hash_map::Entry::Occupied(_) => {
+ panic!("Generated duplicate funding txid?");
+ },
+ hash_map::Entry::Vacant(e) => {
+ let mut id_to_peer = self.id_to_peer.lock().unwrap();
+ if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
+ panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
}
+ e.insert(chan);
}
- Ok(())
- } else {
- return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) })
}
+ Ok(())
}
#[cfg(test)]
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
&self.total_consistency_lock, &self.persistence_notifier,
);
- {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- for channel_id in channel_ids {
- if !peer_state.channel_by_id.contains_key(channel_id) {
- return Err(APIError::ChannelUnavailable {
- err: format!("Channel with ID {} was not found", log_bytes!(*channel_id)),
- });
- }
- }
- for channel_id in channel_ids {
- let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
- if !channel.update_config(config) {
- continue;
- }
- if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
- } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: channel.get_counterparty_node_id(),
- msg,
- });
- }
- }
- } else {
- return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) });
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ for channel_id in channel_ids {
+ if !peer_state.channel_by_id.contains_key(channel_id) {
+ return Err(APIError::ChannelUnavailable {
+ err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
+ });
+ }
+ }
+ for channel_id in channel_ids {
+ let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
+ if !channel.update_config(config) {
+ continue;
+ }
+ if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
+ } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
+ node_id: channel.get_counterparty_node_id(),
+ msg,
+ });
}
}
Ok(())
let next_hop_scid = {
let peer_state_lock = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = peer_state_lock.get(&next_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.get(next_hop_channel_id) {
- Some(chan) => {
- if !chan.is_usable() {
- return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
- })
- }
- chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
- },
- None => return Err(APIError::ChannelUnavailable {
- err: format!("Channel with id {} not found", log_bytes!(*next_hop_channel_id))
- })
- }
- } else {
- return Err(APIError::APIMisuseError{ err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", next_node_id) });
+ let peer_state_mutex = peer_state_lock.get(&next_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.get(next_hop_channel_id) {
+ Some(chan) => {
+ if !chan.is_usable() {
+ return Err(APIError::ChannelUnavailable {
+ err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
+ })
+ }
+ chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
+ },
+ None => return Err(APIError::ChannelUnavailable {
+ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
+ })
}
};
}
}
if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
- let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
- if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
- let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
+ let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
+ if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
+ let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
Ok(res) => res,
Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
- if let None = per_peer_state.get(&counterparty_node_id) {
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
forwarding_channel_not_found!();
continue;
}
- let peer_state_mutex = per_peer_state.get(&counterparty_node_id).unwrap();
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(forward_chan_id) {
hash_map::Entry::Vacant(_) => {
let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
let mut receiver_node_id = self.our_network_pubkey;
if phantom_shared_secret.is_some() {
- receiver_node_id = self.keys_manager.get_node_id(Recipient::PhantomNode)
+ receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
}
match claimable_htlc.onion_payload {
OnionPayload::Invoice { .. } => {
let payment_data = payment_data.unwrap();
- let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
- Ok(payment_preimage) => payment_preimage,
+ let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
+ Ok(result) => result,
Err(()) => {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0));
fail_htlc!(claimable_htlc, payment_hash);
continue
}
};
+ if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
+ let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64;
+ if (cltv_expiry as u64) < expected_min_expiry_height {
+ log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
+ log_bytes!(payment_hash.0), cltv_expiry, expected_min_expiry_height);
+ fail_htlc!(claimable_htlc, payment_hash);
+ continue;
+ }
+ }
check_total_value!(payment_data, payment_preimage);
},
OnionPayload::Spontaneous(preimage) => {
}
}
+ let best_block_height = self.best_block.read().unwrap().height();
+ self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
+ || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
+ &self.pending_events, &self.logger,
+ |path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv|
+ self.send_payment_along_path(path, payment_params, payment_hash, payment_secret, total_value, cur_height, payment_id, keysend_preimage, session_priv));
+
for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
}
BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
// The channel has already been closed, so no use bothering to care about the
// monitor updating completing.
- let _ = self.chain_monitor.update_channel(funding_txo, update);
+ let _ = self.chain_monitor.update_channel(funding_txo, &update);
},
}
}
self.process_background_events();
}
- fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<K::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
+ fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
if !chan.is_outbound() { return NotifyOption::SkipPersist; }
// If the feerate has decreased by less than half, don't bother
if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
/// the channel.
/// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
/// with the current `ChannelConfig`.
+ /// * Removing peers which have disconnected but and no longer have any channels.
///
/// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
/// estimate fetches.
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
let mut timed_out_mpp_htlcs = Vec::new();
+ let mut pending_peers_awaiting_removal = Vec::new();
{
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let pending_msg_events = &mut channel_state.pending_msg_events;
let per_peer_state = self.per_peer_state.read().unwrap();
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
+ let counterparty_node_id = *counterparty_node_id;
peer_state.channel_by_id.retain(|chan_id, chan| {
let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
if let Err(e) = chan.timer_check_closing_negotiation_progress() {
let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
- handle_errors.push((Err(err), *counterparty_node_id));
+ handle_errors.push((Err(err), counterparty_node_id));
if needs_close { return false; }
}
true
});
+ if peer_state.ok_to_remove(true) {
+ pending_peers_awaiting_removal.push(counterparty_node_id);
+ }
+ }
+ }
+
+ // When a peer disconnects but still has channels, the peer's `peer_state` entry in the
+ // `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
+ // of to that peer is later closed while still being disconnected (i.e. force closed),
+ // we therefore need to remove the peer from `peer_state` separately.
+ // To avoid having to take the `per_peer_state` `write` lock once the channels are
+ // closed, we instead remove such peers awaiting removal here on a timer, to limit the
+ // negative effects on parallelism as much as possible.
+ if pending_peers_awaiting_removal.len() > 0 {
+ let mut per_peer_state = self.per_peer_state.write().unwrap();
+ for counterparty_node_id in pending_peers_awaiting_removal {
+ match per_peer_state.entry(counterparty_node_id) {
+ hash_map::Entry::Occupied(entry) => {
+ // Remove the entry if the peer is still disconnected and we still
+ // have no channels to the peer.
+ let remove_entry = {
+ let peer_state = entry.get().lock().unwrap();
+ peer_state.ok_to_remove(true)
+ };
+ if remove_entry {
+ entry.remove_entry();
+ }
+ },
+ hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
+ }
}
}
/// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
/// startup during which time claims that were in-progress at shutdown may be replayed.
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
+ self.fail_htlc_backwards_with_reason(payment_hash, &FailureCode::IncorrectOrUnknownPaymentDetails);
+ }
+
+ /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
+ /// reason for the failure.
+ ///
+ /// See [`FailureCode`] for valid failure codes.
+ pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: &FailureCode) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
if let Some((_, mut sources)) = removed_source {
for htlc in sources.drain(..) {
- let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
- htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
let source = HTLCSource::PreviousHopData(htlc.prev_hop);
- let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
}
}
}
- /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
- /// that we want to return and a channel.
- ///
+ /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
+ fn get_htlc_fail_reason_from_failure_code(&self, failure_code: &FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
+ match failure_code {
+ FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(*failure_code as u16),
+ FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(*failure_code as u16),
+ FailureCode::IncorrectOrUnknownPaymentDetails => {
+ let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
+ htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
+ HTLCFailReason::reason(*failure_code as u16, htlc_msat_height_data)
+ }
+ }
+ }
+
+ /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
+ /// that we want to return and a channel.
+ ///
/// This is for failures on the channel on which the HTLC was *received*, not failures
/// forwarding
- fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
+ fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
// We can't be sure what SCID was used when relaying inbound towards us, so we have to
// guess somewhat. If its a public channel, we figure best to just use the real SCID (as
// we're not leaking that we have a channel with the counterparty), otherwise we try to use
/// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
/// that we want to return and a channel.
- fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<<K::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
+ fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
/// Fails an HTLC backwards to the sender of it to us.
/// Note that we do not assume that channels corresponding to failed HTLCs are still available.
fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
- #[cfg(debug_assertions)]
- {
- // Ensure that the `channel_state` and no peer state channel storage lock is not held
- // when calling this function.
- // This ensures that future code doesn't introduce a lock_order requirement for
- // `forward_htlcs` to be locked after the `channel_state` and `per_peer_state` locks,
- // which calling this function with the locks aquired would.
- assert!(self.channel_state.try_lock().is_ok());
- assert!(self.per_peer_state.try_write().is_ok());
+ // Ensure that no peer state channel storage lock is held when calling this function.
+ // This ensures that future code doesn't introduce a lock-order requirement for
+ // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
+ // this function with any `per_peer_state` peer lock acquired would.
+ for (_, peer) in self.per_peer_state.read().unwrap().iter() {
+ debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
}
//TODO: There is a timing attack here where if a node fails an HTLC back to us they can
let mut receiver_node_id = self.our_network_pubkey;
for htlc in sources.iter() {
if htlc.prev_hop.phantom_shared_secret.is_some() {
- let phantom_pubkey = self.keys_manager.get_node_id(Recipient::PhantomNode)
+ let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = phantom_pubkey;
break;
let mut expected_amt_msat = None;
let mut valid_mpp = true;
let mut errs = Vec::new();
- let mut channel_state = Some(self.channel_state.lock().unwrap());
- let mut per_peer_state = Some(self.per_peer_state.read().unwrap());
+ let per_peer_state = self.per_peer_state.read().unwrap();
for htlc in sources.iter() {
let (counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&htlc.prev_hop.short_channel_id) {
Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
}
};
- if let None = per_peer_state.as_ref().unwrap().get(&counterparty_node_id) {
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
valid_mpp = false;
break;
}
- let peer_state_mutex = per_peer_state.as_ref().unwrap().get(&counterparty_node_id).unwrap();
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
let peer_state = &mut *peer_state_lock;
- if let None = peer_state.channel_by_id.get(&chan_id) {
+ if peer_state.channel_by_id.get(&chan_id).is_none() {
valid_mpp = false;
break;
}
claimable_amt_msat += htlc.value;
}
+ mem::drop(per_peer_state);
if sources.is_empty() || expected_amt_msat.is_none() {
- mem::drop(channel_state);
- mem::drop(per_peer_state);
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
return;
}
if claimable_amt_msat != expected_amt_msat.unwrap() {
- mem::drop(channel_state);
- mem::drop(per_peer_state);
self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
expected_amt_msat.unwrap(), claimable_amt_msat);
}
if valid_mpp {
for htlc in sources.drain(..) {
- if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
- if per_peer_state.is_none() { per_peer_state = Some(self.per_peer_state.read().unwrap()); }
- if let Err((pk, err)) = self.claim_funds_from_hop(channel_state.take().unwrap(), per_peer_state.take().unwrap(),
+ if let Err((pk, err)) = self.claim_funds_from_hop(
htlc.prev_hop, payment_preimage,
|_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
{
}
}
}
- mem::drop(channel_state);
- mem::drop(per_peer_state);
if !valid_mpp {
for htlc in sources.drain(..) {
let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
}
fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
- mut channel_state_lock: MutexGuard<ChannelHolder>,
- per_peer_state_lock: RwLockReadGuard<HashMap<PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>>>,
prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
-> Result<(), (PublicKey, MsgHandleErrInternal)> {
//TODO: Delay the claimed_funds relaying just like we do outbound relay!
+ let per_peer_state = self.per_peer_state.read().unwrap();
let chan_id = prev_hop.outpoint.to_channel_id();
- let channel_state = &mut *channel_state_lock;
let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
None => None
};
- let (found_channel, mut peer_state_opt) = if counterparty_node_id_opt.is_some() && per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).is_some() {
- let peer_mutex = per_peer_state_lock.get(&counterparty_node_id_opt.unwrap()).unwrap();
- let peer_state = peer_mutex.lock().unwrap();
- let found_channel = peer_state.channel_by_id.contains_key(&chan_id);
- (found_channel, Some(peer_state))
- } else { (false, None) };
-
- if found_channel {
- if let hash_map::Entry::Occupied(mut chan) = peer_state_opt.as_mut().unwrap().channel_by_id.entry(chan_id) {
- let counterparty_node_id = chan.get().get_counterparty_node_id();
- match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
- Ok(msgs_monitor_option) => {
- if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
- "Failed to update channel monitor with preimage {:?}: {:?}",
- payment_preimage, e);
- let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
- mem::drop(channel_state_lock);
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
- self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
- return Err((counterparty_node_id, err));
- }
- }
- if let Some((msg, commitment_signed)) = msgs {
- log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
- log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: chan.get().get_counterparty_node_id(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: vec![msg],
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed,
- }
- });
- }
- mem::drop(channel_state_lock);
- mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
- self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
- Ok(())
- } else {
- Ok(())
- }
- },
- Err((e, monitor_update)) => {
- match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
+ let mut peer_state_opt = counterparty_node_id_opt.as_ref().map(
+ |counterparty_node_id| per_peer_state.get(counterparty_node_id).map(
+ |peer_mutex| peer_mutex.lock().unwrap()
+ )
+ ).unwrap_or(None);
+
+ if let Some(hash_map::Entry::Occupied(mut chan)) = peer_state_opt.as_mut().map(|peer_state| peer_state.channel_by_id.entry(chan_id))
+ {
+ let counterparty_node_id = chan.get().get_counterparty_node_id();
+ match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
+ Ok(msgs_monitor_option) => {
+ if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
ChannelMonitorUpdateStatus::Completed => {},
e => {
- // TODO: This needs to be handled somehow - if we receive a monitor update
- // with a preimage we *must* somehow manage to propagate it to the upstream
- // channel, or we must have an ability to receive the same update and try
- // again on restart.
- log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
- "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug },
+ "Failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, e);
- },
+ let err = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err();
+ mem::drop(peer_state_opt);
+ mem::drop(per_peer_state);
+ self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+ return Err((counterparty_node_id, err));
+ }
}
- let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
- if drop {
- chan.remove_entry();
+ if let Some((msg, commitment_signed)) = msgs {
+ log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
+ log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
+ peer_state_opt.as_mut().unwrap().pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: vec![msg],
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed,
+ }
+ });
}
- mem::drop(channel_state_lock);
mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
- self.handle_monitor_update_completion_actions(completion_action(None));
- Err((counterparty_node_id, res))
- },
- }
- } else {
- // We've held the peer_state mutex since finding the channel and setting
- // found_channel to true, so the channel can't have been dropped.
- unreachable!()
+ mem::drop(per_peer_state);
+ self.handle_monitor_update_completion_actions(completion_action(Some(htlc_value_msat)));
+ Ok(())
+ } else {
+ Ok(())
+ }
+ },
+ Err((e, monitor_update)) => {
+ match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ // TODO: This needs to be handled somehow - if we receive a monitor update
+ // with a preimage we *must* somehow manage to propagate it to the upstream
+ // channel, or we must have an ability to receive the same update and try
+ // again on restart.
+ log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info },
+ "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
+ payment_preimage, e);
+ },
+ }
+ let (drop, res) = convert_chan_err!(self, e, chan.get_mut(), &chan_id);
+ if drop {
+ chan.remove_entry();
+ }
+ mem::drop(peer_state_opt);
+ mem::drop(per_peer_state);
+ self.handle_monitor_update_completion_actions(completion_action(None));
+ Err((counterparty_node_id, res))
+ },
}
} else {
let preimage_update = ChannelMonitorUpdate {
};
// We update the ChannelMonitor on the backward link, after
// receiving an `update_fulfill_htlc` from the forward link.
- let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, preimage_update);
+ let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
if update_res != ChannelMonitorUpdateStatus::Completed {
// TODO: This needs to be handled somehow - if we receive a monitor update
// with a preimage we *must* somehow manage to propagate it to the upstream
log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
payment_preimage, update_res);
}
- mem::drop(channel_state_lock);
mem::drop(peer_state_opt);
- mem::drop(per_peer_state_lock);
+ mem::drop(per_peer_state);
// Note that we do process the completion action here. This totally could be a
// duplicate claim, but we have no way of knowing without interrogating the
// `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
}
- fn claim_funds_internal(&self, channel_state_lock: MutexGuard<ChannelHolder>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
+ fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
- mem::drop(channel_state_lock);
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
- let res = self.claim_funds_from_hop(channel_state_lock, self.per_peer_state.read().unwrap(), hop_data, payment_preimage,
+ let res = self.claim_funds_from_hop(hop_data, payment_preimage,
|htlc_claim_value_msat| {
if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
/// Handles a channel reentering a functional state, either due to reconnect or a monitor
/// update completion.
fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
- channel: &mut Channel<<K::Target as SignerProvider>::Signer>, raa: Option<msgs::RevokeAndACK>,
+ channel: &mut Channel<<SP::Target as SignerProvider>::Signer>, raa: Option<msgs::RevokeAndACK>,
commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
let htlc_forwards;
let (mut pending_failures, finalized_claims, counterparty_node_id) = {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
let counterparty_node_id = match counterparty_node_id {
Some(cp_id) => cp_id.clone(),
None => {
};
let per_peer_state = self.per_peer_state.read().unwrap();
let mut peer_state_lock;
+ let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return }
+ peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
let mut channel = {
- if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
- peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
- hash_map::Entry::Occupied(chan) => chan,
- hash_map::Entry::Vacant(_) => return,
- }
- } else { return }
+ match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
+ hash_map::Entry::Occupied(chan) => chan,
+ hash_map::Entry::Vacant(_) => return,
+ }
};
if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
return;
}
- let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
+ let updates = channel.get_mut().monitor_updating_restored(&self.logger, &self.node_signer, self.genesis_hash, &self.default_configuration, self.best_block.read().unwrap().height());
let channel_update = if updates.channel_ready.is_some() && channel.get().is_usable() {
// We only send a channel_update in the case where we are just now sending a
// channel_ready and the channel is in a usable state. We may re-send a
})
} else { None }
} else { None };
- htlc_forwards = self.handle_channel_resumption(&mut channel_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
+ htlc_forwards = self.handle_channel_resumption(&mut peer_state.pending_msg_events, channel.get_mut(), updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
if let Some(upd) = channel_update {
- channel_state.pending_msg_events.push(upd);
+ peer_state.pending_msg_events.push(upd);
}
(updates.failed_htlcs, updates.finalized_claimed_htlcs, counterparty_node_id)
fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
- hash_map::Entry::Occupied(mut channel) => {
- if !channel.get().inbound_is_awaiting_accept() {
- return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
- }
- if *counterparty_node_id != channel.get().get_counterparty_node_id() {
- return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
- }
- if accept_0conf {
- channel.get_mut().set_0conf();
- } else if channel.get().get_channel_type().requires_zero_conf() {
- let send_msg_err_event = events::MessageSendEvent::HandleError {
- node_id: channel.get().get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage{
- msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
- }
- };
- channel_state.pending_msg_events.push(send_msg_err_event);
- let _ = remove_channel!(self, channel);
- return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
- }
-
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: channel.get().get_counterparty_node_id(),
- msg: channel.get_mut().accept_inbound_channel(user_channel_id),
- });
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
+ hash_map::Entry::Occupied(mut channel) => {
+ if !channel.get().inbound_is_awaiting_accept() {
+ return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
}
- hash_map::Entry::Vacant(_) => {
- return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() });
+ if accept_0conf {
+ channel.get_mut().set_0conf();
+ } else if channel.get().get_channel_type().requires_zero_conf() {
+ let send_msg_err_event = events::MessageSendEvent::HandleError {
+ node_id: channel.get().get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage{
+ msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
+ }
+ };
+ peer_state.pending_msg_events.push(send_msg_err_event);
+ let _ = remove_channel!(self, channel);
+ return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
}
+
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: channel.get().get_counterparty_node_id(),
+ msg: channel.get_mut().accept_inbound_channel(user_channel_id),
+ });
+ }
+ hash_map::Entry::Vacant(_) => {
+ return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) });
}
- } else {
- return Err(APIError::APIMisuseError { err: format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id) });
}
Ok(())
}
- fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
+ fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
if msg.chain_hash != self.genesis_hash {
return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
}
}
let mut random_bytes = [0u8; 16];
- random_bytes.copy_from_slice(&self.keys_manager.get_secure_random_bytes()[..16]);
+ random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
let user_channel_id = u128::from_be_bytes(random_bytes);
let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
- let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager,
- counterparty_node_id.clone(), &their_features, msg, user_channel_id, &self.default_configuration,
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
+ counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id, &self.default_configuration,
self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
{
Err(e) => {
},
Ok(res) => res
};
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(channel.channel_id()) {
- hash_map::Entry::Occupied(_) => {
- self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
- return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
- },
- hash_map::Entry::Vacant(entry) => {
- if !self.default_configuration.manually_accept_inbound_channels {
- if channel.get_channel_type().requires_zero_conf() {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
- }
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
- node_id: counterparty_node_id.clone(),
- msg: channel.accept_inbound_channel(user_channel_id),
- });
- } else {
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(
- events::Event::OpenChannelRequest {
- temporary_channel_id: msg.temporary_channel_id.clone(),
- counterparty_node_id: counterparty_node_id.clone(),
- funding_satoshis: msg.funding_satoshis,
- push_msat: msg.push_msat,
- channel_type: channel.get_channel_type().clone(),
- }
- );
+ match peer_state.channel_by_id.entry(channel.channel_id()) {
+ hash_map::Entry::Occupied(_) => {
+ self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
+ },
+ hash_map::Entry::Vacant(entry) => {
+ if !self.default_configuration.manually_accept_inbound_channels {
+ if channel.get_channel_type().requires_zero_conf() {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
}
-
- entry.insert(channel);
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
+ node_id: counterparty_node_id.clone(),
+ msg: channel.accept_inbound_channel(user_channel_id),
+ });
+ } else {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(
+ events::Event::OpenChannelRequest {
+ temporary_channel_id: msg.temporary_channel_id.clone(),
+ counterparty_node_id: counterparty_node_id.clone(),
+ funding_satoshis: msg.funding_satoshis,
+ push_msat: msg.push_msat,
+ channel_type: channel.get_channel_type().clone(),
+ }
+ );
}
+
+ entry.insert(channel);
}
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id.clone()))
}
Ok(())
}
- fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
+ fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
let (value, output_script, user_id) = {
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
- }
- try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &their_features), chan);
- (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
+ (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
}
};
let mut pending_events = self.pending_events.lock().unwrap();
}
fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
+ })?;
let ((funding_msg, monitor, mut channel_ready), mut chan) = {
let best_block = *self.best_block.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
- }
- (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.keys_manager, &self.logger), chan), chan.remove())
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
}
};
// Because we have exclusive ownership of the channel here we can release the peer_state
// It's safe to unwrap as we've held the `per_peer_state` read lock since checking that the
// peer exists, despite the inner PeerState potentially having no channels after removing
// the channel above.
- let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap();
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(funding_msg.channel_id) {
i_e.insert(chan.get_counterparty_node_id());
}
}
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
node_id: counterparty_node_id.clone(),
msg: funding_msg,
});
if let Some(msg) = channel_ready {
- send_channel_ready!(self, channel_state.pending_msg_events, chan, msg);
+ send_channel_ready!(self, peer_state.pending_msg_events, chan, msg);
}
e.insert(chan);
}
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
let funding_tx = {
let best_block = *self.best_block.read().unwrap();
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.keys_manager, &self.logger) {
- Ok(update) => update,
- Err(e) => try_chan_entry!(self, Err(e), chan),
- };
- match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
- ChannelMonitorUpdateStatus::Completed => {},
- e => {
- let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
- // We weren't able to watch the channel to begin with, so no updates should be made on
- // it. Previously, full_stack_target found an (unreachable) panic when the
- // monitor update contained within `shutdown_finish` was applied.
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
- shutdown_finish.0.take();
- }
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger) {
+ Ok(update) => update,
+ Err(e) => try_chan_entry!(self, Err(e), chan),
+ };
+ match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
+ ChannelMonitorUpdateStatus::Completed => {},
+ e => {
+ let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
+ if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
+ // We weren't able to watch the channel to begin with, so no updates should be made on
+ // it. Previously, full_stack_target found an (unreachable) panic when the
+ // monitor update contained within `shutdown_finish` was applied.
+ if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
+ shutdown_finish.0.take();
}
- return res
- },
- }
- if let Some(msg) = channel_ready {
- send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
- }
- funding_tx
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ }
+ return res
+ },
+ }
+ if let Some(msg) = channel_ready {
+ send_channel_ready!(self, peer_state.pending_msg_events, chan.get(), msg);
+ }
+ funding_tx
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
}
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, self.get_our_node_id(),
- self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), chan);
- if let Some(announcement_sigs) = announcement_sigs_opt {
- log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
+ self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
+ if let Some(announcement_sigs) = announcement_sigs_opt {
+ log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: counterparty_node_id.clone(),
+ msg: announcement_sigs,
+ });
+ } else if chan.get().is_usable() {
+ // If we're sending an announcement_signatures, we'll send the (public)
+ // channel_update after sending a channel_announcement when we receive our
+ // counterparty's announcement_signatures. Thus, we only bother to send a
+ // channel_update here if the channel is not public, i.e. we're not sending an
+ // announcement_signatures.
+ log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
+ if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
node_id: counterparty_node_id.clone(),
- msg: announcement_sigs,
+ msg,
});
- } else if chan.get().is_usable() {
- // If we're sending an announcement_signatures, we'll send the (public)
- // channel_update after sending a channel_announcement when we receive our
- // counterparty's announcement_signatures. Thus, we only bother to send a
- // channel_update here if the channel is not public, i.e. we're not sending an
- // announcement_signatures.
- log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
- if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
}
+ }
- emit_channel_ready_event!(self, chan.get_mut());
+ emit_channel_ready_event!(self, chan.get_mut());
- Ok(())
- },
- hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
- fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
+ fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
let result: Result<(), _> = loop {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
- if !chan_entry.get().received_shutdown() {
- log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
- log_bytes!(msg.channel_id),
- if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
- }
+ if !chan_entry.get().received_shutdown() {
+ log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
+ log_bytes!(msg.channel_id),
+ if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
+ }
- let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
- dropped_htlcs = htlcs;
-
- // Update the monitor with the shutdown script if necessary.
- if let Some(monitor_update) = monitor_update {
- let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update);
- let (result, is_permanent) =
- handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
- if is_permanent {
- remove_channel!(self, chan_entry);
- break result;
- }
+ let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
+ dropped_htlcs = htlcs;
+
+ // Update the monitor with the shutdown script if necessary.
+ if let Some(monitor_update) = monitor_update {
+ let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
+ let (result, is_permanent) =
+ handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
+ if is_permanent {
+ remove_channel!(self, chan_entry);
+ break result;
}
+ }
- if let Some(msg) = shutdown {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: *counterparty_node_id,
- msg,
- });
- }
+ if let Some(msg) = shutdown {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ }
- break Ok(());
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ break Ok(());
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
for htlc_source in dropped_htlcs.drain(..) {
}
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
let (tx, chan_option) = {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
- hash_map::Entry::Occupied(mut chan_entry) => {
- if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
- if let Some(msg) = closing_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- if tx.is_some() {
- // We're done with this channel, we've got a signed closing transaction and
- // will send the closing_signed back to the remote peer upon return. This
- // also implies there are no pending HTLCs left on the channel, so we can
- // fully delete it from tracking (the channel monitor is still around to
- // watch for old state broadcasts)!
- (tx, Some(remove_channel!(self, chan_entry)))
- } else { (tx, None) }
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
+ hash_map::Entry::Occupied(mut chan_entry) => {
+ let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
+ if let Some(msg) = closing_signed {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
+ node_id: counterparty_node_id.clone(),
+ msg,
+ });
+ }
+ if tx.is_some() {
+ // We're done with this channel, we've got a signed closing transaction and
+ // will send the closing_signed back to the remote peer upon return. This
+ // also implies there are no pending HTLCs left on the channel, so we can
+ // fully delete it from tracking (the channel monitor is still around to
+ // watch for old state broadcasts)!
+ (tx, Some(remove_channel!(self, chan_entry)))
+ } else { (tx, None) }
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
if let Some(broadcast_tx) = tx {
}
if let Some(chan) = chan_option {
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
- let mut channel_state = self.channel_state.lock().unwrap();
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
msg: update
});
}
let pending_forward_info = self.decode_update_add_htlc_onion(msg);
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+
+ let create_pending_htlc_status = |chan: &Channel<<SP::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
+ // If the update_add is completely bogus, the call will Err and we will close,
+ // but if we've sent a shutdown and they haven't acknowledged it yet, we just
+ // want to reject the new HTLC and fail it backwards instead of forwarding.
+ match pending_forward_info {
+ PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
+ let reason = if (error_code & 0x1000) != 0 {
+ let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
+ HTLCFailReason::reason(real_code, error_data)
+ } else {
+ HTLCFailReason::from_failure_code(error_code)
+ }.get_encrypted_failure_packet(incoming_shared_secret, &None);
+ let msg = msgs::UpdateFailHTLC {
+ channel_id: msg.channel_id,
+ htlc_id: msg.htlc_id,
+ reason
+ };
+ PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
+ },
+ _ => pending_forward_info
}
-
- let create_pending_htlc_status = |chan: &Channel<<K::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
- // If the update_add is completely bogus, the call will Err and we will close,
- // but if we've sent a shutdown and they haven't acknowledged it yet, we just
- // want to reject the new HTLC and fail it backwards instead of forwarding.
- match pending_forward_info {
- PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
- let reason = if (error_code & 0x1000) != 0 {
- let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
- HTLCFailReason::reason(real_code, error_data)
- } else {
- HTLCFailReason::from_failure_code(error_code)
- }.get_encrypted_failure_packet(incoming_shared_secret, &None);
- let msg = msgs::UpdateFailHTLC {
- channel_id: msg.channel_id,
- htlc_id: msg.htlc_id,
- reason
- };
- PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
- },
- _ => pending_forward_info
- }
- };
- try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ };
+ try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
- let channel_lock = self.channel_state.lock().unwrap();
let (htlc_source, forwarded_htlc_value) = {
let per_peer_state = self.per_peer_state.read().unwrap();
- if let None = per_peer_state.get(counterparty_node_id) {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
- }
- let peer_state_mutex = per_peer_state.get(counterparty_node_id).unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
},
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
- self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
+ self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
Ok(())
}
fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
- }
- Ok(())
- }
-
- fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
- let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- if (msg.failure_code & 0x8000) == 0 {
- let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
- try_chan_entry!(self, Err(chan_err), chan);
- }
- try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
- Ok(())
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ }
+ Ok(())
+ }
+
+ fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if (msg.failure_code & 0x8000) == 0 {
+ let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
+ try_chan_entry!(self, Err(chan_err), chan);
+ }
+ try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- let (revoke_and_ack, commitment_signed, monitor_update) =
- match chan.get_mut().commitment_signed(&msg, &self.logger) {
- Err((None, e)) => try_chan_entry!(self, Err(e), chan),
- Err((Some(update), e)) => {
- assert!(chan.get().is_awaiting_monitor_update());
- let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
- try_chan_entry!(self, Err(e), chan);
- unreachable!();
- },
- Ok(res) => res
- };
- let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update);
- if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
- return Err(e);
- }
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ let (revoke_and_ack, commitment_signed, monitor_update) =
+ match chan.get_mut().commitment_signed(&msg, &self.logger) {
+ Err((None, e)) => try_chan_entry!(self, Err(e), chan),
+ Err((Some(update), e)) => {
+ assert!(chan.get().is_awaiting_monitor_update());
+ let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &update);
+ try_chan_entry!(self, Err(e), chan);
+ unreachable!();
+ },
+ Ok(res) => res
+ };
+ let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &monitor_update);
+ if let Err(e) = handle_monitor_update_res!(self, update_res, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) {
+ return Err(e);
+ }
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: counterparty_node_id.clone(),
+ msg: revoke_and_ack,
+ });
+ if let Some(msg) = commitment_signed {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: counterparty_node_id.clone(),
- msg: revoke_and_ack,
+ updates: msgs::CommitmentUpdate {
+ update_add_htlcs: Vec::new(),
+ update_fulfill_htlcs: Vec::new(),
+ update_fail_htlcs: Vec::new(),
+ update_fail_malformed_htlcs: Vec::new(),
+ update_fee: None,
+ commitment_signed: msg,
+ },
});
- if let Some(msg) = commitment_signed {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id.clone(),
- updates: msgs::CommitmentUpdate {
- update_add_htlcs: Vec::new(),
- update_fulfill_htlcs: Vec::new(),
- update_fail_htlcs: Vec::new(),
- update_fail_malformed_htlcs: Vec::new(),
- update_fee: None,
- commitment_signed: msg,
- },
- });
- }
- Ok(())
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ }
+ Ok(())
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
}
fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
let mut htlcs_to_fail = Vec::new();
let res = loop {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
- let raa_updates = break_chan_entry!(self,
- chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
- htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
- let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update);
- if was_paused_for_mon_update {
- assert!(update_res != ChannelMonitorUpdateStatus::Completed);
- assert!(raa_updates.commitment_update.is_none());
- assert!(raa_updates.accepted_htlcs.is_empty());
- assert!(raa_updates.failed_htlcs.is_empty());
- assert!(raa_updates.finalized_claimed_htlcs.is_empty());
- break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
- }
- if update_res != ChannelMonitorUpdateStatus::Completed {
- if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
- RAACommitmentOrder::CommitmentFirst, false,
- raa_updates.commitment_update.is_some(), false,
- raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs) {
- break Err(e);
- } else { unreachable!(); }
- }
- if let Some(updates) = raa_updates.commitment_update {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: counterparty_node_id.clone(),
- updates,
- });
- }
- break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
- raa_updates.finalized_claimed_htlcs,
- chan.get().get_short_channel_id()
- .unwrap_or(chan.get().outbound_scid_alias()),
- chan.get().get_funding_txo().unwrap(),
- chan.get().get_user_id()))
- },
- hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update();
+ let raa_updates = break_chan_entry!(self,
+ chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
+ htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
+ let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), &raa_updates.monitor_update);
+ if was_paused_for_mon_update {
+ assert!(update_res != ChannelMonitorUpdateStatus::Completed);
+ assert!(raa_updates.commitment_update.is_none());
+ assert!(raa_updates.accepted_htlcs.is_empty());
+ assert!(raa_updates.failed_htlcs.is_empty());
+ assert!(raa_updates.finalized_claimed_htlcs.is_empty());
+ break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned()));
+ }
+ if update_res != ChannelMonitorUpdateStatus::Completed {
+ if let Err(e) = handle_monitor_update_res!(self, update_res, chan,
+ RAACommitmentOrder::CommitmentFirst, false,
+ raa_updates.commitment_update.is_some(), false,
+ raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+ raa_updates.finalized_claimed_htlcs) {
+ break Err(e);
+ } else { unreachable!(); }
+ }
+ if let Some(updates) = raa_updates.commitment_update {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id.clone(),
+ updates,
+ });
+ }
+ break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
+ raa_updates.finalized_claimed_htlcs,
+ chan.get().get_short_channel_id()
+ .unwrap_or(chan.get().outbound_scid_alias()),
+ chan.get().get_funding_txo().unwrap(),
+ chan.get().get_user_id()))
+ },
+ hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- if !chan.get().is_usable() {
- return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
- }
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if !chan.get().is_usable() {
+ return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
+ }
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
- msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
- self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), chan),
- // Note that announcement_signatures fails if the channel cannot be announced,
- // so get_channel_update_for_broadcast will never fail by the time we get here.
- update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
- });
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+ peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
+ msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
+ &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
+ msg, &self.default_configuration
+ ), chan),
+ // Note that announcement_signatures fails if the channel cannot be announced,
+ // so get_channel_update_for_broadcast will never fail by the time we get here.
+ update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()),
+ });
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
Ok(())
}
}
};
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(&chan_counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(chan_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- if chan.get().should_announce() {
- // If the announcement is about a channel of ours which is public, some
- // other peer may simply be forwarding all its gossip to us. Don't provide
- // a scary-looking error message and return Ok instead.
- return Ok(NotifyOption::SkipPersist);
- }
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
- }
- let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
- let msg_from_node_one = msg.contents.flags & 1 == 0;
- if were_node_one == msg_from_node_one {
+ let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
+ if peer_state_mutex_opt.is_none() {
+ return Ok(NotifyOption::SkipPersist)
+ }
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(chan_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+ if chan.get().should_announce() {
+ // If the announcement is about a channel of ours which is public, some
+ // other peer may simply be forwarding all its gossip to us. Don't provide
+ // a scary-looking error message and return Ok instead.
return Ok(NotifyOption::SkipPersist);
- } else {
- log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
- try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
}
- },
- hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
- }
- } else {
- return Ok(NotifyOption::SkipPersist)
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
+ }
+ let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
+ let msg_from_node_one = msg.contents.flags & 1 == 0;
+ if were_node_one == msg_from_node_one {
+ return Ok(NotifyOption::SkipPersist);
+ } else {
+ log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
+ try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
+ }
+ },
+ hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
}
Ok(NotifyOption::DoPersist)
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
let htlc_forwards;
let need_lnd_workaround = {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- match peer_state.channel_by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- // Currently, we expect all holding cell update_adds to be dropped on peer
- // disconnect, so Channel's reestablish will never hand us any holding cell
- // freed HTLCs to fail backwards. If in the future we no longer drop pending
- // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
- let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
- msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
- &*self.best_block.read().unwrap()), chan);
- let mut channel_update = None;
- if let Some(msg) = responses.shutdown_msg {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
+ let peer_state_mutex = per_peer_state.get(counterparty_node_id)
+ .ok_or_else(|| {
+ debug_assert!(false);
+ MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
+ })?;
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ match peer_state.channel_by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ // Currently, we expect all holding cell update_adds to be dropped on peer
+ // disconnect, so Channel's reestablish will never hand us any holding cell
+ // freed HTLCs to fail backwards. If in the future we no longer drop pending
+ // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+ let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
+ msg, &self.logger, &self.node_signer, self.genesis_hash,
+ &self.default_configuration, &*self.best_block.read().unwrap()), chan);
+ let mut channel_update = None;
+ if let Some(msg) = responses.shutdown_msg {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
+ node_id: counterparty_node_id.clone(),
+ msg,
+ });
+ } else if chan.get().is_usable() {
+ // If the channel is in a usable state (ie the channel is not being shut
+ // down), send a unicast channel_update to our counterparty to make sure
+ // they have the latest channel parameters.
+ if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
+ channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
+ node_id: chan.get().get_counterparty_node_id(),
msg,
});
- } else if chan.get().is_usable() {
- // If the channel is in a usable state (ie the channel is not being shut
- // down), send a unicast channel_update to our counterparty to make sure
- // they have the latest channel parameters.
- if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
- channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
- node_id: chan.get().get_counterparty_node_id(),
- msg,
- });
- }
}
- let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
- htlc_forwards = self.handle_channel_resumption(
- &mut channel_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
- Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
- if let Some(upd) = channel_update {
- channel_state.pending_msg_events.push(upd);
- }
- need_lnd_workaround
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
- } else {
- return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer with a node_id matching the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id));
+ }
+ let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
+ htlc_forwards = self.handle_channel_resumption(
+ &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
+ Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
+ if let Some(upd) = channel_update {
+ peer_state.pending_msg_events.push(upd);
+ }
+ need_lnd_workaround
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
+ self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
},
MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
MonitorEvent::UpdateFailed(funding_outpoint) => {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
let counterparty_node_id_opt = match counterparty_node_id {
Some(cp_id) => Some(cp_id),
None => {
if let Some(counterparty_node_id) = counterparty_node_id_opt {
let per_peer_state = self.per_peer_state.read().unwrap();
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
- let pending_msg_events = &mut channel_state.pending_msg_events;
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
let mut chan = remove_channel!(self, chan_entry);
failed_channels.push(chan.force_shutdown(false));
let mut failed_htlcs = Vec::new();
let mut handle_errors = Vec::new();
{
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let pending_msg_events = &mut channel_state.pending_msg_events;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|channel_id, chan| {
match chan.maybe_free_holding_cell_htlcs(&self.logger) {
Ok((commitment_opt, holding_cell_failed_htlcs)) => {
));
}
if let Some((commitment_update, monitor_update)) = commitment_opt {
- match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), &monitor_update) {
ChannelMonitorUpdateStatus::Completed => {
pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
node_id: chan.get_counterparty_node_id(),
let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
let mut has_update = false;
{
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let pending_msg_events = &mut channel_state.pending_msg_events;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|channel_id, chan| {
match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
Ok((msg_opt, tx_opt)) => {
return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) });
}
- let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes());
+ let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes());
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
/// [`PaymentHash`] and [`PaymentPreimage`] for you.
///
/// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
- /// will have the [`PaymentClaimable::payment_preimage`] field filled in. That should then be
+ /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with
+ /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be
/// passed directly to [`claim_funds`].
///
/// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
///
/// Errors if `min_value_msat` is greater than total bitcoin supply.
///
+ /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
+ /// on versions of LDK prior to 0.0.114.
+ ///
/// [`claim_funds`]: Self::claim_funds
/// [`PaymentClaimable`]: events::Event::PaymentClaimable
- /// [`PaymentClaimable::payment_preimage`]: events::Event::PaymentClaimable::payment_preimage
+ /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
+ /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment
+ /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage
/// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
- pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> {
- inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64)
+ pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
+ min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
+ inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs,
+ &self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
+ min_final_cltv_expiry_delta)
}
/// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share
/// [`create_inbound_payment`]: Self::create_inbound_payment
#[deprecated]
pub fn create_inbound_payment_legacy(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> {
- let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes());
+ let payment_preimage = PaymentPreimage(self.entropy_source.get_secure_random_bytes());
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?;
Ok((payment_hash, payment_secret))
/// If you need exact expiry semantics, you should enforce them upon receipt of
/// [`PaymentClaimable`].
///
- /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry`
- /// set to at least [`MIN_FINAL_CLTV_EXPIRY`].
+ /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry_delta`
+ /// set to at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
///
/// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
/// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
///
/// Errors if `min_value_msat` is greater than total bitcoin supply.
///
+ /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
+ /// on versions of LDK prior to 0.0.114.
+ ///
/// [`create_inbound_payment`]: Self::create_inbound_payment
/// [`PaymentClaimable`]: events::Event::PaymentClaimable
- pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, ()> {
- inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64)
+ pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
+ invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>) -> Result<PaymentSecret, ()> {
+ inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash,
+ invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
+ min_final_cltv_expiry)
}
/// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
+ let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
// Ensure the generated scid doesn't conflict with a real channel.
match short_to_chan_info.get(&scid_candidate) {
Some(_) => continue,
let best_block_height = self.best_block.read().unwrap().height();
let short_to_chan_info = self.short_to_chan_info.read().unwrap();
loop {
- let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
+ let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
// Ensure the generated scid doesn't conflict with a real channel.
if short_to_chan_info.contains_key(&scid_candidate) { continue }
return scid_candidate
events.into_inner()
}
+ #[cfg(feature = "_test_utils")]
+ pub fn push_pending_event(&self, event: events::Event) {
+ let mut events = self.pending_events.lock().unwrap();
+ events.push(event);
+ }
+
#[cfg(test)]
pub fn pop_pending_event(&self) -> Option<events::Event> {
let mut events = self.pending_events.lock().unwrap();
}
}
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
{
+ /// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated.
+ /// The returned array will contain `MessageSendEvent`s for different peers if
+ /// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer
+ /// is always placed next to each other.
+ ///
+ /// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for
+ /// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains
+ /// `MessageSendEvent`s for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a`
+ /// will randomly be placed first or last in the returned array.
+ ///
+ /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
+ /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
+ /// the `MessageSendEvent`s to the specific peer they were generated under.
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
let events = RefCell::new(Vec::new());
PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
}
let mut pending_events = Vec::new();
- let mut channel_state = self.channel_state.lock().unwrap();
- mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
+ let mut peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if peer_state.pending_msg_events.len() > 0 {
+ pending_events.append(&mut peer_state.pending_msg_events);
+ }
+ }
if !pending_events.is_empty() {
events.replace(pending_events);
}
}
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> EventsProvider for ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
}
}
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> chain::Listen for ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
*best_block = BestBlock::new(header.prev_blockhash, new_height)
}
- self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
+ self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
}
}
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)
+ self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
.map(|(a, b)| (a, Vec::new(), b)));
let last_best_block_height = self.best_block.read().unwrap().height();
if height < last_best_block_height {
let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
- self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
+ self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
}
}
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
- self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
+ self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
macro_rules! max_time {
($timestamp: expr) => {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
for chan in peer_state.channel_by_id.values() {
- if let (Some(funding_txo), block_hash) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
- res.push((funding_txo.txid, block_hash));
+ if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
+ res.push((funding_txo.txid, Some(block_hash)));
}
}
}
}
}
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
/// Calls a function which handles an on-chain event (blocks dis/connected, transactions
/// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
/// the function.
- fn do_chain_event<FN: Fn(&mut Channel<<K::Target as SignerProvider>::Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
+ fn do_chain_event<FN: Fn(&mut Channel<<SP::Target as SignerProvider>::Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
(&self, height_opt: Option<u32>, f: FN) {
// Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
// during initialization prior to the chain_monitor being fully configured in some cases.
let mut failed_channels = Vec::new();
let mut timed_out_htlcs = Vec::new();
{
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let pending_msg_events = &mut channel_state.pending_msg_events;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, channel| {
let res = f(channel);
if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
msg: announcement_sigs,
});
if let Some(height) = height_opt {
- if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) {
+ if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
msg: announcement,
// Note that announcement_signatures fails if the channel cannot be announced,
// so get_channel_update_for_broadcast will never fail by the time we get here.
- update_msg: self.get_channel_update_for_broadcast(channel).unwrap(),
+ update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
});
}
}
pub fn current_best_block(&self) -> BestBlock {
self.best_block.read().unwrap().clone()
}
+
+ /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
+ /// [`ChannelManager`].
+ pub fn node_features(&self) -> NodeFeatures {
+ provided_node_features(&self.default_configuration)
+ }
+
+ /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
+ /// [`ChannelManager`].
+ ///
+ /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
+ /// or not. Thus, this method is not public.
+ #[cfg(any(feature = "_test_utils", test))]
+ pub fn invoice_features(&self) -> InvoiceFeatures {
+ provided_invoice_features(&self.default_configuration)
+ }
+
+ /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
+ /// [`ChannelManager`].
+ pub fn channel_features(&self) -> ChannelFeatures {
+ provided_channel_features(&self.default_configuration)
+ }
+
+ /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by
+ /// [`ChannelManager`].
+ pub fn channel_type_features(&self) -> ChannelTypeFeatures {
+ provided_channel_type_features(&self.default_configuration)
+ }
+
+ /// Fetches the set of [`InitFeatures`] flags which are provided by or required by
+ /// [`ChannelManager`].
+ pub fn init_features(&self) -> InitFeatures {
+ provided_init_features(&self.default_configuration)
+ }
}
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
- ChannelMessageHandler for ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+ ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
{
- fn handle_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
+ fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
+ let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
}
- fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) {
+ fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
+ let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
}
- fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
+ fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id);
+ let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut failed_channels = Vec::new();
- let mut no_channels_remain = true;
- let mut channel_state = self.channel_state.lock().unwrap();
let mut per_peer_state = self.per_peer_state.write().unwrap();
- {
- let pending_msg_events = &mut channel_state.pending_msg_events;
+ let remove_peer = {
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, chan| {
- if chan.get_counterparty_node_id() == *counterparty_node_id {
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- if chan.is_shutdown() {
- update_maps_on_chan_removal!(self, chan);
- self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
- return false;
- } else {
- no_channels_remain = false;
- }
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
+ if chan.is_shutdown() {
+ update_maps_on_chan_removal!(self, chan);
+ self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
+ return false;
}
true
});
- }
- pending_msg_events.retain(|msg| {
- match msg {
- &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendChannelReady { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
- &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
- &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
- &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
- &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
- &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
- &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
- }
- });
- mem::drop(channel_state);
- }
- if no_channels_remain {
+ pending_msg_events.retain(|msg| {
+ match msg {
+ &events::MessageSendEvent::SendAcceptChannel { .. } => false,
+ &events::MessageSendEvent::SendOpenChannel { .. } => false,
+ &events::MessageSendEvent::SendFundingCreated { .. } => false,
+ &events::MessageSendEvent::SendFundingSigned { .. } => false,
+ &events::MessageSendEvent::SendChannelReady { .. } => false,
+ &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
+ &events::MessageSendEvent::UpdateHTLCs { .. } => false,
+ &events::MessageSendEvent::SendRevokeAndACK { .. } => false,
+ &events::MessageSendEvent::SendClosingSigned { .. } => false,
+ &events::MessageSendEvent::SendShutdown { .. } => false,
+ &events::MessageSendEvent::SendChannelReestablish { .. } => false,
+ &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
+ &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
+ &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
+ &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
+ &events::MessageSendEvent::SendChannelUpdate { .. } => false,
+ &events::MessageSendEvent::HandleError { .. } => false,
+ &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
+ &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
+ &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
+ &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
+ }
+ });
+ debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
+ peer_state.is_connected = false;
+ peer_state.ok_to_remove(true)
+ } else { true }
+ };
+ if remove_peer {
per_peer_state.remove(counterparty_node_id);
}
mem::drop(per_peer_state);
e.insert(Mutex::new(PeerState {
channel_by_id: HashMap::new(),
latest_features: init_msg.features.clone(),
+ pending_msg_events: Vec::new(),
+ is_connected: true,
}));
},
hash_map::Entry::Occupied(e) => {
- e.get().lock().unwrap().latest_features = init_msg.features.clone();
+ let mut peer_state = e.get().lock().unwrap();
+ peer_state.latest_features = init_msg.features.clone();
+ debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
+ peer_state.is_connected = true;
},
}
}
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- let pending_msg_events = &mut channel_state.pending_msg_events;
let per_peer_state = self.per_peer_state.read().unwrap();
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ let pending_msg_events = &mut peer_state.pending_msg_events;
peer_state.channel_by_id.retain(|_, chan| {
let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
if !chan.have_received_message() {
}
} else { true };
if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
- if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
+ if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
node_id: *counterparty_node_id,
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
if msg.channel_id == [0; 32] {
- for chan in self.list_channels() {
- if chan.counterparty.node_id == *counterparty_node_id {
- // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
- let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data), true);
- }
+ let channel_ids: Vec<[u8; 32]> = {
+ let per_peer_state = self.per_peer_state.read().unwrap();
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return; }
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ peer_state.channel_by_id.keys().cloned().collect()
+ };
+ for channel_id in channel_ids {
+ // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
+ let _ = self.force_close_channel_with_peer(&channel_id, counterparty_node_id, Some(&msg.data), true);
}
} else {
{
// First check if we can advance the channel type and try again.
- let mut channel_state = self.channel_state.lock().unwrap();
let per_peer_state = self.per_peer_state.read().unwrap();
- if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
- let mut peer_state_lock = peer_state_mutex.lock().unwrap();
- let peer_state = &mut *peer_state_lock;
- if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
- if chan.get_counterparty_node_id() != *counterparty_node_id {
- return;
- }
- if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
- node_id: *counterparty_node_id,
- msg,
- });
- return;
- }
+ let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
+ if peer_state_mutex_opt.is_none() { return; }
+ let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
+ let peer_state = &mut *peer_state_lock;
+ if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
+ if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
+ peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
+ node_id: *counterparty_node_id,
+ msg,
+ });
+ return;
}
- } else { return; }
+ }
}
// Untrusted messages from peer, we throw away the error if id points to a non-existent channel
}
fn provided_node_features(&self) -> NodeFeatures {
- provided_node_features()
+ provided_node_features(&self.default_configuration)
}
fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
- provided_init_features()
+ provided_init_features(&self.default_configuration)
}
}
/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
-pub fn provided_node_features() -> NodeFeatures {
- provided_init_features().to_context()
+pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
+ provided_init_features(config).to_context()
}
/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
/// or not. Thus, this method is not public.
#[cfg(any(feature = "_test_utils", test))]
-pub fn provided_invoice_features() -> InvoiceFeatures {
- provided_init_features().to_context()
+pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures {
+ provided_init_features(config).to_context()
}
/// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
-pub fn provided_channel_features() -> ChannelFeatures {
- provided_init_features().to_context()
+pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
+ provided_init_features(config).to_context()
+}
+
+/// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by
+/// [`ChannelManager`].
+pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
+ ChannelTypeFeatures::from_init(&provided_init_features(config))
}
/// Fetches the set of [`InitFeatures`] flags which are provided by or required by
/// [`ChannelManager`].
-pub fn provided_init_features() -> InitFeatures {
+pub fn provided_init_features(_config: &UserConfig) -> InitFeatures {
// Note that if new features are added here which other peers may (eventually) require, we
// should also add the corresponding (optional) bit to the ChannelMessageHandler impl for
// ErroringMessageHandler.
features.set_channel_type_optional();
features.set_scid_privacy_optional();
features.set_zero_conf_optional();
+ #[cfg(anchors)]
+ { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
+ if _config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
+ features.set_anchors_zero_fee_htlc_tx_optional();
+ }
+ }
features
}
impl Readable for ChannelDetails {
fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
- init_and_read_tlv_fields!(reader, {
+ _init_and_read_tlv_fields!(reader, {
(1, inbound_scid_alias, option),
(2, channel_id, required),
(3, channel_type, option),
(8, min_value_msat, required),
});
-impl<M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref> Writeable for ChannelManager<M, T, K, F, R, L>
+impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
best_block.block_hash().write(writer)?;
}
+ let mut serializable_peer_count: u64 = 0;
{
let per_peer_state = self.per_peer_state.read().unwrap();
let mut unfunded_channels = 0;
for (_, peer_state_mutex) in per_peer_state.iter() {
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
let peer_state = &mut *peer_state_lock;
+ if !peer_state.ok_to_remove(false) {
+ serializable_peer_count += 1;
+ }
number_of_channels += peer_state.channel_by_id.len();
for (_, channel) in peer_state.channel_by_id.iter() {
if !channel.is_funding_initiated() {
}
}
+ let per_peer_state = self.per_peer_state.write().unwrap();
+
let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
let claimable_payments = self.claimable_payments.lock().unwrap();
let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
htlc_purposes.push(purpose);
}
- let per_peer_state = self.per_peer_state.write().unwrap();
- (per_peer_state.len() as u64).write(writer)?;
+ (serializable_peer_count).write(writer)?;
for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
- peer_pubkey.write(writer)?;
- let peer_state = peer_state_mutex.lock().unwrap();
- peer_state.latest_features.write(writer)?;
+ let peer_state_lock = peer_state_mutex.lock().unwrap();
+ let peer_state = &*peer_state_lock;
+ // Peers which we have no channels to should be dropped once disconnected. As we
+ // disconnect all peers when shutting down and serializing the ChannelManager, we
+ // consider all peers as disconnected here. There's therefore no need write peers with
+ // no channels.
+ if !peer_state.ok_to_remove(false) {
+ peer_pubkey.write(writer)?;
+ peer_state.latest_features.write(writer)?;
+ }
}
let events = self.pending_events.lock().unwrap();
/// which you've already broadcasted the transaction.
///
/// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
-pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
+pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
{
+ /// A cryptographically secure source of entropy.
+ pub entropy_source: ES,
+
+ /// A signer that is able to perform node-scoped cryptographic operations.
+ pub node_signer: NS,
+
/// The keys provider which will give us relevant keys. Some keys will be loaded during
/// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel
/// signing data.
- pub keys_manager: K,
+ pub signer_provider: SP,
/// The fee_estimator for use in the ChannelManager in the future.
///
/// this struct.
///
/// (C-not exported) because we have no HashMap bindings
- pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<K::Target as SignerProvider>::Signer>>,
+ pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>,
}
-impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
- ChannelManagerReadArgs<'a, M, T, K, F, R, L>
+impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+ ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
/// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
/// HashMap for you. This is primarily useful for C bindings where it is not practical to
/// populate a HashMap directly from C.
- pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig,
- mut channel_monitors: Vec<&'a mut ChannelMonitor<<K::Target as SignerProvider>::Signer>>) -> Self {
+ pub fn new(entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig,
+ mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>) -> Self {
Self {
- keys_manager, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
+ entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
}
}
// Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
// SipmleArcChannelManager type:
-impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, R, L>> for (BlockHash, Arc<ChannelManager<M, T, K, F, R, L>>)
+impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+ ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, L>>)
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
{
- fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result<Self, DecodeError> {
- let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, K, F, R, L>)>::read(reader, args)?;
+ fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
+ let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, L>)>::read(reader, args)?;
Ok((blockhash, Arc::new(chan_manager)))
}
}
-impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, R: Deref, L: Deref>
- ReadableArgs<ChannelManagerReadArgs<'a, M, T, K, F, R, L>> for (BlockHash, ChannelManager<M, T, K, F, R, L>)
+impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
+ ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, L>)
where
- M::Target: chain::Watch<<K::Target as SignerProvider>::Signer>,
+ M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
T::Target: BroadcasterInterface,
- K::Target: KeysInterface,
+ ES::Target: EntropySource,
+ NS::Target: NodeSigner,
+ SP::Target: SignerProvider,
F::Target: FeeEstimator,
R::Target: Router,
L::Target: Logger,
{
- fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, K, F, R, L>) -> Result<Self, DecodeError> {
+ fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
let genesis_hash: BlockHash = Readable::read(reader)?;
let channel_count: u64 = Readable::read(reader)?;
let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
- let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<<K::Target as SignerProvider>::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
+ let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<<SP::Target as SignerProvider>::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
let mut channel_closures = Vec::new();
for _ in 0..channel_count {
- let mut channel: Channel<<K::Target as SignerProvider>::Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
+ let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
+ &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
+ ))?;
let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
funding_txo_set.insert(funding_txo.clone());
if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
}
}
- for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
+ for (funding_txo, monitor) in args.channel_monitors.iter_mut() {
if !funding_txo_set.contains(funding_txo) {
log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
}
let peer_count: u64 = Readable::read(reader)?;
- let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<<K::Target as SignerProvider>::Signer>>)>()));
+ let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>)>()));
for _ in 0..peer_count {
let peer_pubkey = Readable::read(reader)?;
let peer_state = PeerState {
channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
latest_features: Readable::read(reader)?,
+ pending_msg_events: Vec::new(),
+ is_connected: false,
};
per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
}
(11, probing_cookie_secret, option),
});
if fake_scid_rand_bytes.is_none() {
- fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes());
+ fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
}
if probing_cookie_secret.is_none() {
- probing_cookie_secret = Some(args.keys_manager.get_secure_random_bytes());
+ probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
}
if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
hash_map::Entry::Vacant(entry) => {
let path_fee = path.get_path_fees();
entry.insert(PendingOutboundPayment::Retryable {
+ retry_strategy: None,
+ attempts: PaymentAttempts::new(),
+ payment_params: None,
session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
payment_hash: htlc.payment_hash,
payment_secret,
+ keysend_preimage: None, // only used for retries, and we'll never retry on startup
pending_amt_msat: path_amt,
pending_fee_msat: Some(path_fee),
total_msat: path_amt,
});
}
- let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
+ let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
payment_preimage: match pending_inbound_payments.get(&payment_hash) {
Some(inbound_payment) => inbound_payment.payment_preimage,
None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
- Ok(payment_preimage) => payment_preimage,
+ Ok((payment_preimage, _)) => payment_preimage,
Err(()) => {
log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0));
return Err(DecodeError::InvalidValue);
}
let mut secp_ctx = Secp256k1::new();
- secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
+ secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
if !channel_closures.is_empty() {
pending_events_read.append(&mut channel_closures);
}
- let our_network_key = match args.keys_manager.get_node_secret(Recipient::Node) {
+ let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
Ok(key) => key,
Err(()) => return Err(DecodeError::InvalidValue)
};
- let our_network_pubkey = PublicKey::from_secret_key(&secp_ctx, &our_network_key);
if let Some(network_pubkey) = received_network_pubkey {
if network_pubkey != our_network_pubkey {
log_error!(args.logger, "Key that was generated does not match the existing key.");
let mut outbound_scid_alias;
loop {
outbound_scid_alias = fake_scid::Namespace::OutboundAlias
- .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager);
+ .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
}
chan.set_outbound_scid_alias(outbound_scid_alias);
let mut receiver_node_id = Some(our_network_pubkey);
let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret;
if phantom_shared_secret.is_some() {
- let phantom_pubkey = args.keys_manager.get_node_id(Recipient::PhantomNode)
+ let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode)
.expect("Failed to get node_id for phantom node recipient");
receiver_node_id = Some(phantom_pubkey)
}
best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
- channel_state: Mutex::new(ChannelHolder {
- pending_msg_events: Vec::new(),
- }),
inbound_payment_key: expanded_inbound_key,
pending_inbound_payments: Mutex::new(pending_inbound_payments),
- pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()) },
+ pending_outbound_payments: OutboundPayments { pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), retry_lock: Mutex::new(()), },
pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
forward_htlcs: Mutex::new(forward_htlcs),
probing_cookie_secret: probing_cookie_secret.unwrap(),
- our_network_key,
our_network_pubkey,
secp_ctx,
total_consistency_lock: RwLock::new(()),
persistence_notifier: Notifier::new(),
- keys_manager: args.keys_manager,
+ entropy_source: args.entropy_source,
+ node_signer: args.node_signer,
+ signer_provider: args.signer_provider,
+
logger: args.logger,
default_configuration: args.default_config,
};
mod tests {
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;
+ use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
use core::time::Duration;
use core::sync::atomic::Ordering;
use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
- use crate::ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure};
+ use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, InterceptId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs;
use crate::ln::msgs::ChannelMessageHandler;
use crate::util::errors::APIError;
use crate::util::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
use crate::util::test_utils;
- use crate::chain::keysinterface::{EntropySource, KeysInterface};
+ use crate::util::config::ChannelConfig;
+ use crate::chain::keysinterface::EntropySource;
#[test]
fn test_notify_limits() {
assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
- let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
// We check that the channel info nodes have doesn't change too early, even though we try
// to connect messages with new values
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ create_announced_chan_between_nodes(&nodes, 0, 1);
// First, send a partial MPP payment.
let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
- create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features());
- let scorer = test_utils::TestScorer::with_penalty(0);
+ create_announced_chan_between_nodes(&nodes, 0, 1);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
// To start (1), send a regular payment but don't claim it.
// Next, attempt a keysend payment and make sure it fails.
let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id()),
+ payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
final_value_msat: 100_000,
final_cltv_expiry_delta: TEST_FINAL_CLTV,
};
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(payee_pubkey),
+ payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
let payer_pubkey = nodes[0].node.get_our_node_id();
let payee_pubkey = nodes[1].node.get_our_node_id();
- nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features());
+ let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
let route_params = RouteParameters {
- payment_params: PaymentParameters::for_keysend(payee_pubkey),
+ payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
final_value_msat: 10_000,
final_cltv_expiry_delta: 40,
};
let network_graph = nodes[0].network_graph.clone();
let first_hops = nodes[0].node.list_usable_channels();
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let scorer = test_utils::TestScorer::new();
let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
let route = find_route(
&payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
- let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
- let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
- let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
- let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id;
+ let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
+ let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
+ let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
+ let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
// Marshall an MPP route.
let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
}
}
+ #[test]
+ fn test_drop_disconnected_peers_when_removing_channels() {
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
+
+ nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false);
+ nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false);
+
+ nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
+ check_closed_broadcast!(nodes[0], true);
+ check_added_monitors!(nodes[0], 1);
+ check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
+
+ {
+ // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
+ // disconnected and the channel between has been force closed.
+ let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
+ // Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
+ assert_eq!(nodes_0_per_peer_state.len(), 1);
+ assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
+ }
+
+ nodes[0].node.timer_tick_occurred();
+
+ {
+ // Assert that nodes[1] has now been removed.
+ assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
+ }
+ }
+
#[test]
fn bad_inbound_payment_hash() {
// Add coverage for checking that a user-provided payment hash matches the payment secret.
nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
- nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel);
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel);
+ nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
let channel_id = &tx.txid().into_inner();
update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap();
- nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
+ nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
- nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &channelmanager::provided_init_features(), &nodes_1_shutdown);
+ nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
}
+
+ fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
+ let expected_message = format!("Not connected to node: {}", expected_public_key);
+ check_api_error_message(expected_message, res_err)
+ }
+
+ fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
+ let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
+ check_api_error_message(expected_message, res_err)
+ }
+
+ fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
+ match res_err {
+ Err(APIError::APIMisuseError { err }) => {
+ assert_eq!(err, expected_err_message);
+ },
+ Err(APIError::ChannelUnavailable { err }) => {
+ assert_eq!(err, expected_err_message);
+ },
+ Ok(_) => panic!("Unexpected Ok"),
+ Err(_) => panic!("Unexpected Error"),
+ }
+ }
+
+ #[test]
+ fn test_api_calls_with_unkown_counterparty_node() {
+ // Tests that our API functions that expects a `counterparty_node_id` as input, behaves as
+ // expected if the `counterparty_node_id` is an unkown peer in the
+ // `ChannelManager::per_peer_state` map.
+ let chanmon_cfg = create_chanmon_cfgs(2);
+ let node_cfg = create_node_cfgs(2, &chanmon_cfg);
+ let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
+ let nodes = create_network(2, &node_cfg, &node_chanmgr);
+
+ // Dummy values
+ let channel_id = [4; 32];
+ let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
+ let intercept_id = InterceptId([0; 32]);
+
+ // Test the API functions.
+ check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key);
+
+ check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
+
+ check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
+
+ check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
+
+ check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
+
+ check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
+
+ check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
+ }
+
+ #[cfg(anchors)]
+ #[test]
+ fn test_anchors_zero_fee_htlc_tx_fallback() {
+ // Tests that if both nodes support anchors, but the remote node does not want to accept
+ // anchor channels at the moment, an error it sent to the local node such that it can retry
+ // the channel without the anchors feature.
+ let chanmon_cfgs = create_chanmon_cfgs(2);
+ let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
+ let mut anchors_config = test_default_channel_config();
+ anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
+ anchors_config.manually_accept_inbound_channels = true;
+ let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
+ let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
+
+ nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None).unwrap();
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
+
+ nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
+ let events = nodes[1].node.get_and_clear_pending_events();
+ match events[0] {
+ Event::OpenChannelRequest { temporary_channel_id, .. } => {
+ nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
+ }
+ _ => panic!("Unexpected event"),
+ }
+
+ let error_msg = get_err_msg!(nodes[1], nodes[0].node.get_our_node_id());
+ nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
+
+ let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
+ assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
+
+ check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
+ }
}
#[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
pub mod bench {
use crate::chain::Listen;
use crate::chain::chainmonitor::{ChainMonitor, Persist};
- use crate::chain::keysinterface::{EntropySource, KeysManager, KeysInterface, InMemorySigner};
+ use crate::chain::keysinterface::{EntropySource, KeysManager, InMemorySigner};
use crate::ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId};
use crate::ln::functional_test_utils::*;
use crate::ln::msgs::{ChannelMessageHandler, Init};
&'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
&'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
&'a test_utils::TestLogger, &'a P>,
- &'a test_utils::TestBroadcaster, &'a KeysManager,
+ &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
&'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
&'a test_utils::TestLogger>,
}
let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
- let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)));
+ let scorer = Mutex::new(test_utils::TestScorer::new());
+ let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(genesis_hash, &logger_a)), &scorer);
let mut config: UserConfig = Default::default();
config.channel_handshake_config.minimum_depth = 1;
let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
let seed_a = [1u8; 32];
let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
- let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
+ let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_genesis(network),
});
let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
let seed_b = [2u8; 32];
let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
- let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
+ let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
network,
best_block: BestBlock::from_genesis(network),
});
let node_b_holder = NodeHolder { node: &node_b };
- node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
- node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap();
+ node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }).unwrap();
+ node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }).unwrap();
node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
- node_b.handle_open_channel(&node_a.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
- node_a.handle_accept_channel(&node_b.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
+ node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
+ node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
let tx;
if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
macro_rules! send_payment {
($node_a: expr, $node_b: expr) => {
let usable_channels = $node_a.list_usable_channels();
- let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id())
- .with_features(channelmanager::provided_invoice_features());
- let scorer = test_utils::TestScorer::with_penalty(0);
+ let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
+ .with_features($node_b.invoice_features());
+ let scorer = test_utils::TestScorer::new();
let seed = [3u8; 32];
let keys_manager = KeysManager::new(&seed, 42, 42);
let random_seed_bytes = keys_manager.get_secure_random_bytes();
payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
payment_count += 1;
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
- let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap();
+ let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
$node_a.send_payment(&route, payment_hash, &Some(payment_secret), PaymentId(payment_hash.0)).unwrap();
let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());