X-Git-Url: http://git.bitcoin.ninja/index.cgi?a=blobdiff_plain;f=lightning%2Fsrc%2Fln%2Fchannelmanager.rs;h=f234ad9f91f555075804d87db115d98193f09619;hb=7544030bb63fee6484fc178bb2ac8f382fe3b5b1;hp=d011d6b42c8621314dfd44fe49709bac702b6620;hpb=4905df889498ec520932ea6684b0ffabbd226641;p=rust-lightning diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index d011d6b4..f234ad9f 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -32,10 +32,10 @@ use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::secp256k1::{SecretKey,PublicKey}; use bitcoin::secp256k1::Secp256k1; use bitcoin::secp256k1::ecdh::SharedSecret; -use bitcoin::secp256k1; +use bitcoin::{LockTime, secp256k1, Sequence}; use chain; -use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock}; +use chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock}; use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; @@ -43,10 +43,11 @@ use chain::transaction::{OutPoint, TransactionData}; // construct one themselves. use ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret}; use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch}; -use ln::features::{ChannelTypeFeatures, InitFeatures, NodeFeatures}; +use ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures}; +#[cfg(any(feature = "_test_utils", test))] +use ln::features::InvoiceFeatures; use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters}; use ln::msgs; -use ln::msgs::NetAddress; use ln::onion_utils; use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT}; use ln::wire::Encode; @@ -54,6 +55,7 @@ use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Rec use util::config::{UserConfig, ChannelConfig}; use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; use util::{byte_utils, events}; +use util::wakers::{Future, Notifier}; use util::scid_utils::fake_scid; use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter}; use util::logger::{Level, Logger}; @@ -64,15 +66,11 @@ use prelude::*; use core::{cmp, mem}; use core::cell::RefCell; use io::Read; -use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard}; +use sync::{Arc, Mutex, MutexGuard, RwLock, RwLockReadGuard}; use core::sync::atomic::{AtomicUsize, Ordering}; use core::time::Duration; use core::ops::Deref; -#[cfg(any(test, feature = "std"))] -use std::time::Instant; -use util::crypto::sign; - // We hold various information about HTLC relay in the HTLC objects in Channel itself: // // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should @@ -403,16 +401,6 @@ pub(super) struct ChannelHolder { /// SCIDs being added once the funding transaction is confirmed at the channel's required /// confirmation depth. pub(super) short_to_chan_info: HashMap, - /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. - /// - /// Note that because we may have an SCID Alias as the key we can have two entries per channel, - /// though in practice we probably won't be receiving HTLCs for a channel both via the alias - /// and via the classic SCID. - /// - /// Note that while this is held in the same mutex as the channels themselves, no consistency - /// guarantees are made about the existence of a channel with the short id here, nor the short - /// ids in the PendingHTLCInfo! - pub(super) forward_htlcs: HashMap>, /// Map from payment hash to the payment data and any HTLCs which are to us and can be /// failed/claimed by the user. /// @@ -679,6 +667,38 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage /// essentially you should default to using a SimpleRefChannelManager, and use a /// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when /// you're using lightning-net-tokio. +// +// Lock order: +// The tree structure below illustrates the lock order requirements for the different locks of the +// `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree, +// and should then be taken in the order of the lowest to the highest level in the tree. +// Note that locks on different branches shall not be taken at the same time, as doing so will +// create a new lock order for those specific locks in the order they were taken. +// +// Lock order tree: +// +// `total_consistency_lock` +// | +// |__`forward_htlcs` +// | +// |__`channel_state` +// | | +// | |__`id_to_peer` +// | | +// | |__`per_peer_state` +// | | +// | |__`outbound_scid_aliases` +// | | +// | |__`pending_inbound_payments` +// | | +// | |__`pending_outbound_payments` +// | | +// | |__`best_block` +// | | +// | |__`pending_events` +// | | +// | |__`pending_background_events` +// pub struct ChannelManager where M::Target: chain::Watch, T::Target: BroadcasterInterface, @@ -692,12 +712,14 @@ pub struct ChannelManager, #[cfg(not(test))] best_block: RwLock, secp_ctx: Secp256k1, + /// See `ChannelManager` struct-level documentation for lock order requirements. #[cfg(any(test, feature = "_test_utils"))] pub(super) channel_state: Mutex>, #[cfg(not(any(test, feature = "_test_utils")))] @@ -707,7 +729,8 @@ pub struct ChannelManager>, /// The session_priv bytes and retry metadata of outbound payments which are pending resolution. @@ -721,13 +744,30 @@ pub struct ChannelManager>, + /// SCID/SCID Alias -> forward infos. Key of 0 means payments received. + /// + /// Note that because we may have an SCID Alias as the key we can have two entries per channel, + /// though in practice we probably won't be receiving HTLCs for a channel both via the alias + /// and via the classic SCID. + /// + /// Note that no consistency guarantees are made about the existence of a channel with the + /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`! + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + #[cfg(test)] + pub(super) forward_htlcs: Mutex>>, + #[cfg(not(test))] + forward_htlcs: Mutex>>, + /// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// and some closed channels which reached a usable state prior to being closed. This is used /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the /// active channel list on load. + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. outbound_scid_aliases: Mutex>, /// `channel_id` -> `counterparty_node_id`. @@ -747,6 +787,8 @@ pub struct ChannelManager>, our_network_key: SecretKey, @@ -766,10 +808,6 @@ pub struct ChannelManager>>, + /// See `ChannelManager` struct-level documentation for lock order requirements. pending_events: Mutex>, + /// See `ChannelManager` struct-level documentation for lock order requirements. pending_background_events: Mutex>, /// Used when we have to take a BIG lock to make sure everything is self-consistent. /// Essentially just when we're serializing ourselves out. /// Taken first everywhere where we are making changes before any other locks. /// When acquiring this lock in read mode, rather than acquiring it directly, call /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the - /// PersistenceNotifier the lock contains sends out a notification when the lock is released. + /// Notifier the lock contains sends out a notification when the lock is released. total_consistency_lock: RwLock<()>, - persistence_notifier: PersistenceNotifier, + persistence_notifier: Notifier, keys_manager: K, @@ -835,18 +875,18 @@ enum NotifyOption { /// notify or not based on whether relevant changes have been made, providing a closure to /// `optionally_notify` which returns a `NotifyOption`. struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> { - persistence_notifier: &'a PersistenceNotifier, + persistence_notifier: &'a Notifier, should_persist: F, // We hold onto this result so the lock doesn't get released immediately. _read_guard: RwLockReadGuard<'a, ()>, } impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused - fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> { + fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> { PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist }) } - fn optionally_notify NotifyOption>(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> { + fn optionally_notify NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> { let read_guard = lock.read().unwrap(); PersistenceNotifierGuard { @@ -1172,13 +1212,13 @@ pub enum PaymentSendFailure { /// in over-/re-payment. /// /// The results here are ordered the same as the paths in the route object which was passed to - /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely - /// retried (though there is currently no API with which to do so). + /// send_payment, and any `Err`s which are not [`APIError::MonitorUpdateInProgress`] can be + /// safely retried via [`ChannelManager::retry_payment`]. /// - /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried - /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the - /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel - /// with the latest update_id. + /// Any entries which contain `Err(APIError::MonitorUpdateInprogress)` or `Ok(())` MUST NOT be + /// retried as they will result in over-/re-payment. These HTLCs all either successfully sent + /// (in the case of `Ok(())`) or will send once a [`MonitorEvent::Completed`] is provided for + /// the next-hop channel with the latest update_id. PartialFailure { /// The errors themselves, in the same order as the route hops. results: Vec>, @@ -1335,11 +1375,11 @@ macro_rules! remove_channel { } } -macro_rules! handle_monitor_err { +macro_rules! handle_monitor_update_res { ($self: ident, $err: expr, $short_to_chan_info: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => { match $err { - ChannelMonitorUpdateErr::PermanentFailure => { - log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..])); + ChannelMonitorUpdateStatus::PermanentFailure => { + log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..])); update_maps_on_chan_removal!($self, $short_to_chan_info, $chan); // TODO: $failed_fails is dropped here, which will cause other channels to hit the // chain in a confused state! We need to move them into the ChannelMonitor which @@ -1351,11 +1391,11 @@ macro_rules! handle_monitor_err { // given up the preimage yet, so might as well just wait until the payment is // retried, avoiding the on-chain fees. let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(), - $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() )); + $chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() )); (res, true) }, - ChannelMonitorUpdateErr::TemporaryFailure => { - log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations", + ChannelMonitorUpdateStatus::InProgress => { + log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations", log_bytes!($chan_id[..]), if $resend_commitment && $resend_raa { match $action_type { @@ -1374,13 +1414,16 @@ macro_rules! handle_monitor_err { if !$resend_raa { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment); } - $chan.monitor_update_failed($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills); + $chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills); (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false) }, + ChannelMonitorUpdateStatus::Completed => { + (Ok(()), false) + }, } }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { { - let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); + let (res, drop) = handle_monitor_update_res!($self, $err, $channel_state.short_to_chan_info, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key()); if drop { $entry.remove_entry(); } @@ -1388,43 +1431,22 @@ macro_rules! handle_monitor_err { } }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { { debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst); - handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) + handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) } }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => { - handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) + handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id) }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => { - handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new()) + handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new()) }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new()) + handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new()) }; ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { - handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new()) + handle_monitor_update_res!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new()) }; } -macro_rules! return_monitor_err { - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment); - }; - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { - return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails); - } -} - -// Does not break in case of TemporaryFailure! -macro_rules! maybe_break_monitor_err { - ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => { - match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) { - (e, ChannelMonitorUpdateErr::PermanentFailure) => { - break e; - }, - (_, ChannelMonitorUpdateErr::TemporaryFailure) => { }, - } - } -} - macro_rules! send_channel_ready { ($short_to_chan_info: expr, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => { $pending_msg_events.push(events::MessageSendEvent::SendChannelReady { @@ -1499,15 +1521,18 @@ macro_rules! handle_chan_restoration_locked { // only case where we can get a new ChannelMonitorUpdate would be if we also // have some commitment updates to send as well. assert!($commitment_update.is_some()); - if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { - // channel_reestablish doesn't guarantee the order it returns is sensical - // for the messages it returns, but if we're setting what messages to - // re-transmit on monitor update success, we need to make sure it is sane. - let mut order = $order; - if $raa.is_none() { - order = RAACommitmentOrder::CommitmentFirst; + match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { + ChannelMonitorUpdateStatus::Completed => {}, + e => { + // channel_reestablish doesn't guarantee the order it returns is sensical + // for the messages it returns, but if we're setting what messages to + // re-transmit on monitor update success, we need to make sure it is sane. + let mut order = $order; + if $raa.is_none() { + order = RAACommitmentOrder::CommitmentFirst; + } + break handle_monitor_update_res!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true); } - break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true); } } @@ -1601,13 +1626,13 @@ impl ChannelMana channel_state: Mutex::new(ChannelHolder{ by_id: HashMap::new(), short_to_chan_info: HashMap::new(), - forward_htlcs: HashMap::new(), claimable_htlcs: HashMap::new(), pending_msg_events: Vec::new(), }), outbound_scid_aliases: Mutex::new(HashSet::new()), pending_inbound_payments: Mutex::new(HashMap::new()), pending_outbound_payments: Mutex::new(HashMap::new()), + forward_htlcs: Mutex::new(HashMap::new()), id_to_peer: Mutex::new(HashMap::new()), our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(), @@ -1619,7 +1644,6 @@ impl ChannelMana probing_cookie_secret: keys_manager.get_secure_random_bytes(), - last_node_announcement_serial: AtomicUsize::new(0), highest_seen_timestamp: AtomicUsize::new(0), per_peer_state: RwLock::new(HashMap::new()), @@ -1627,7 +1651,7 @@ impl ChannelMana pending_events: Mutex::new(Vec::new()), pending_background_events: Mutex::new(Vec::new()), total_consistency_lock: RwLock::new(()), - persistence_notifier: PersistenceNotifier::new(), + persistence_notifier: Notifier::new(), keys_manager, @@ -1635,7 +1659,7 @@ impl ChannelMana } } - /// Gets the current configuration applied to all new channels, as + /// Gets the current configuration applied to all new channels. pub fn get_current_default_configuration(&self) -> &UserConfig { &self.default_configuration } @@ -1859,13 +1883,12 @@ impl ChannelMana // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { - if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { - let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, channel_state, chan_entry); - break result; - } + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); + let (result, is_permanent) = + handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + if is_permanent { + remove_channel!(self, channel_state, chan_entry); + break result; } } @@ -1891,7 +1914,7 @@ impl ChannelMana for htlc_source in failed_htlcs.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id }; - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -1949,7 +1972,7 @@ impl ChannelMana for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: channel_id }; - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + self.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } if let Some((funding_txo, monitor_update)) = monitor_update_option { // There isn't anything we can do if we get an update failure - we're already @@ -2144,17 +2167,17 @@ impl ChannelMana }) } - fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard>) { + fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus { macro_rules! return_malformed_err { ($msg: expr, $err_code: expr) => { { log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { + return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(), failure_code: $err_code, - })), self.channel_state.lock().unwrap()); + })); } } } @@ -2174,25 +2197,20 @@ impl ChannelMana //node knows the HMAC matched, so they already know what is there... return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4); } - - let mut channel_state = None; macro_rules! return_err { ($msg: expr, $err_code: expr, $data: expr) => { { log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg); - if channel_state.is_none() { - channel_state = Some(self.channel_state.lock().unwrap()); - } - return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { + return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id: msg.channel_id, htlc_id: msg.htlc_id, reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data), - })), channel_state.unwrap()); + })); } } } - let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) { + let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) { Ok(res) => res, Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { return_malformed_err!(err_msg, err_code); @@ -2246,14 +2264,14 @@ impl ChannelMana } }; - channel_state = Some(self.channel_state.lock().unwrap()); if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info { // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel // with a short_channel_id of 0. This is important as various things later assume // short_channel_id is non-0 in any ::Forward. if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing { - let id_option = channel_state.as_ref().unwrap().short_to_chan_info.get(&short_channel_id).cloned(); if let Some((err, code, chan_update)) = loop { + let mut channel_state = self.channel_state.lock().unwrap(); + let id_option = channel_state.short_to_chan_info.get(&short_channel_id).cloned(); let forwarding_id_opt = match id_option { None => { // unknown_next_peer // Note that this is likely a timing oracle for detecting whether an scid is a @@ -2267,7 +2285,7 @@ impl ChannelMana Some((_cp_id, chan_id)) => Some(chan_id.clone()), }; let chan_update_opt = if let Some(forwarding_id) = forwarding_id_opt { - let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap(); + let chan = channel_state.by_id.get_mut(&forwarding_id).unwrap(); if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels { // Note that the behavior here should be identical to the above block - we // should NOT reveal the existence or non-existence of a private channel if @@ -2353,7 +2371,7 @@ impl ChannelMana } } - (pending_forward_info, channel_state.unwrap()) + pending_forward_info } /// Gets the current channel_update for the given channel. This first checks if the channel is @@ -2486,19 +2504,30 @@ impl ChannelMana channel_state, chan) } { Some((update_add, commitment_signed, monitor_update)) => { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true); - // Note that MonitorUpdateFailed here indicates (per function docs) - // that we will resend the commitment update once monitor updating - // is restored. Therefore, we must return an error indicating that - // it is unsafe to retry the payment wholesale, which we do in the - // send_payment check for MonitorUpdateFailed, below. - insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. - return Err(APIError::MonitorUpdateFailed); + let update_err = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); + let chan_id = chan.get().channel_id(); + match (update_err, + handle_monitor_update_res!(self, update_err, channel_state, chan, + RAACommitmentOrder::CommitmentFirst, false, true)) + { + (ChannelMonitorUpdateStatus::PermanentFailure, Err(e)) => break Err(e), + (ChannelMonitorUpdateStatus::Completed, Ok(())) => { + insert_outbound_payment!(); + }, + (ChannelMonitorUpdateStatus::InProgress, Err(_)) => { + // Note that MonitorUpdateInProgress here indicates (per function + // docs) that we will resend the commitment update once monitor + // updating completes. Therefore, we must return an error + // indicating that it is unsafe to retry the payment wholesale, + // which we do in the send_payment check for + // MonitorUpdateInProgress, below. + insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above. + return Err(APIError::MonitorUpdateInProgress); + }, + _ => unreachable!(), } - insert_outbound_payment!(); - log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); + log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan_id)); channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { node_id: path.first().unwrap().pubkey, updates: msgs::CommitmentUpdate { @@ -2544,12 +2573,12 @@ impl ChannelMana /// PaymentSendFailure for more info. /// /// In general, a path may raise: - /// * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee, + /// * [`APIError::RouteError`] when an invalid route or forwarding parameter (cltv_delta, fee, /// node public key) is specified. - /// * APIError::ChannelUnavailable if the next-hop channel is not available for updates + /// * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates /// (including due to previous monitor update failure or new permanent monitor update /// failure). - /// * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the + /// * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the /// relevant updates. /// /// Note that depending on the type of the PaymentSendFailure the HTLC may have been @@ -2613,8 +2642,8 @@ impl ChannelMana for (res, path) in results.iter().zip(route.paths.iter()) { if res.is_ok() { has_ok = true; } if res.is_err() { has_err = true; } - if let &Err(APIError::MonitorUpdateFailed) = res { - // MonitorUpdateFailed is inherently unsafe to retry, so we call it a + if let &Err(APIError::MonitorUpdateInProgress) = res { + // MonitorUpdateInProgress is inherently unsafe to retry, so we call it a // PartialFailure. has_err = true; has_ok = true; @@ -2901,8 +2930,7 @@ impl ChannelMana // Transactions are evaluated as final by network mempools at the next block. However, the modules // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if // the wallet module is in advance on the LDK view, allow one more block of headroom. - // TODO: updated if/when https://github.com/rust-bitcoin/rust-bitcoin/pull/994 landed and rust-bitcoin bumped. - if !funding_transaction.input.iter().all(|input| input.sequence == 0xffffffff) && funding_transaction.lock_time < 500_000_000 && funding_transaction.lock_time > height + 2 { + if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 { return Err(APIError::APIMisuseError { err: "Funding transaction absolute timelock is non-final".to_owned() }); @@ -2935,89 +2963,6 @@ impl ChannelMana }) } - #[allow(dead_code)] - // Messages of up to 64KB should never end up more than half full with addresses, as that would - // be absurd. We ensure this by checking that at least 100 (our stated public contract on when - // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB - // message... - const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2; - #[deny(const_err)] - #[allow(dead_code)] - // ...by failing to compile if the number of addresses that would be half of a message is - // smaller than 100: - const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 100; - - /// Regenerates channel_announcements and generates a signed node_announcement from the given - /// arguments, providing them in corresponding events via - /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed - /// on-chain. This effectively re-broadcasts all channel announcements and sends our node - /// announcement to ensure that the lightning P2P network is aware of the channels we have and - /// our network addresses. - /// - /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this - /// node to humans. They carry no in-protocol meaning. - /// - /// `addresses` represent the set (possibly empty) of socket addresses on which this node - /// accepts incoming connections. These will be included in the node_announcement, publicly - /// tying these addresses together and to this node. If you wish to preserve user privacy, - /// addresses should likely contain only Tor Onion addresses. - /// - /// Panics if `addresses` is absurdly large (more than 100). - /// - /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events - pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec) { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - - if addresses.len() > 100 { - panic!("More than half the message size was taken up by public addresses!"); - } - - // While all existing nodes handle unsorted addresses just fine, the spec requires that - // addresses be sorted for future compatibility. - addresses.sort_by_key(|addr| addr.get_id()); - - let announcement = msgs::UnsignedNodeAnnouncement { - features: NodeFeatures::known(), - timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32, - node_id: self.get_our_node_id(), - rgb, alias, addresses, - excess_address_data: Vec::new(), - excess_data: Vec::new(), - }; - let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]); - let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key); - - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - - let mut announced_chans = false; - for (_, chan) in channel_state.by_id.iter() { - if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement { - msg, - update_msg: match self.get_channel_update_for_broadcast(chan) { - Ok(msg) => msg, - Err(_) => continue, - }, - }); - announced_chans = true; - } else { - // If the channel is not public or has not yet reached channel_ready, check the - // next channel. If we don't yet have any public channels, we'll skip the broadcast - // below as peers may not accept it without channels on chain first. - } - } - - if announced_chans { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement { - msg: msgs::NodeAnnouncement { - signature: node_announce_sig, - contents: announcement - }, - }); - } - } - /// Atomically updates the [`ChannelConfig`] for the given channels. /// /// Once the updates are applied, each eligible channel (advertised with a known short channel @@ -3097,10 +3042,12 @@ impl ChannelMana let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); let mut handle_errors = Vec::new(); { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; + let mut forward_htlcs = HashMap::new(); + mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); - for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() { + for (short_chan_id, mut pending_forwards) in forward_htlcs { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; if short_chan_id != 0 { let forward_chan_id = match channel_state.short_to_chan_info.get(&short_chan_id) { Some((_cp_id, chan_id)) => chan_id.clone(), @@ -3153,7 +3100,7 @@ impl ChannelMana let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode); if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) { let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes(); - let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { + let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) { Ok(res) => res, Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => { let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner(); @@ -3298,9 +3245,12 @@ impl ChannelMana continue; } }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); - continue; + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + ChannelMonitorUpdateStatus::Completed => {}, + e => { + handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true))); + continue; + } } log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}", add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id())); @@ -3498,7 +3448,7 @@ impl ChannelMana } for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason, destination); + self.fail_htlc_backwards_internal(htlc_source, &payment_hash, failure_reason, destination); } self.forward_htlcs(&mut phantom_receives); @@ -3569,23 +3519,26 @@ impl ChannelMana }; let ret_err = match res { Ok(Some((update_fee, commitment_signed, monitor_update))) => { - if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - let (res, drop) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); - if drop { retain_channel = false; } - res - } else { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, - }); - Ok(()) + match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + ChannelMonitorUpdateStatus::Completed => { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + Ok(()) + }, + e => { + let (res, drop) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY); + if drop { retain_channel = false; } + res + } } }, Ok(None) => Ok(()), @@ -3722,7 +3675,7 @@ impl ChannelMana for htlc_source in timed_out_mpp_htlcs.drain(..) { let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver ); + self.fail_htlc_backwards_internal(HTLCSource::PreviousHopData(htlc_source.0.clone()), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() }, receiver ); } for (err, counterparty_node_id) in handle_errors.drain(..) { @@ -3748,15 +3701,16 @@ impl ChannelMana pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut channel_state = Some(self.channel_state.lock().unwrap()); - let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash); + let removed_source = { + let mut channel_state = self.channel_state.lock().unwrap(); + channel_state.claimable_htlcs.remove(payment_hash) + }; if let Some((_, mut sources)) = removed_source { for htlc in sources.drain(..) { - if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( self.best_block.read().unwrap().height())); - self.fail_htlc_backwards_internal(channel_state.take().unwrap(), + self.fail_htlc_backwards_internal( HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }, HTLCDestination::FailedPayment { payment_hash: *payment_hash }); @@ -3819,72 +3773,31 @@ impl ChannelMana counterparty_node_id: &PublicKey ) { for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) { - match htlc_src { - HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => { - let (failure_code, onion_failure_data) = - match self.channel_state.lock().unwrap().by_id.entry(channel_id) { - hash_map::Entry::Occupied(chan_entry) => { - self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) - }, - hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) - }; - let channel_state = self.channel_state.lock().unwrap(); + let (failure_code, onion_failure_data) = + match self.channel_state.lock().unwrap().by_id.entry(channel_id) { + hash_map::Entry::Occupied(chan_entry) => { + self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get()) + }, + hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new()) + }; - let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; - self.fail_htlc_backwards_internal(channel_state, htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver) - }, - HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => { - let mut session_priv_bytes = [0; 32]; - session_priv_bytes.copy_from_slice(&session_priv[..]); - let mut outbounds = self.pending_outbound_payments.lock().unwrap(); - if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) { - if payment.get_mut().remove(&session_priv_bytes, Some(&path)) && !payment.get().is_fulfilled() { - let retry = if let Some(payment_params_data) = payment_params { - let path_last_hop = path.last().expect("Outbound payments must have had a valid path"); - Some(RouteParameters { - payment_params: payment_params_data, - final_value_msat: path_last_hop.fee_msat, - final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta, - }) - } else { None }; - let mut pending_events = self.pending_events.lock().unwrap(); - pending_events.push(events::Event::PaymentPathFailed { - payment_id: Some(payment_id), - payment_hash, - rejected_by_dest: false, - network_update: None, - all_paths_failed: payment.get().remaining_parts() == 0, - path: path.clone(), - short_channel_id: None, - retry, - #[cfg(test)] - error_code: None, - #[cfg(test)] - error_data: None, - }); - if payment.get().abandoned() && payment.get().remaining_parts() == 0 { - pending_events.push(events::Event::PaymentFailed { - payment_id, - payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"), - }); - payment.remove(); - } - } - } else { - log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); - } - }, - }; + let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id }; + self.fail_htlc_backwards_internal(htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data }, receiver); } } /// Fails an HTLC backwards to the sender of it to us. - /// Note that while we take a channel_state lock as input, we do *not* assume consistency here. - /// There are several callsites that do stupid things like loop over a list of payment_hashes - /// to fail and take the channel_state lock for each iteration (as we take ownership and may - /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to - /// still-available channels. - fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason, destination: HTLCDestination) { + /// Note that we do not assume that channels corresponding to failed HTLCs are still available. + fn fail_htlc_backwards_internal(&self, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason,destination: HTLCDestination) { + #[cfg(debug_assertions)] + { + // Ensure that the `channel_state` lock is not held when calling this function. + // This ensures that future code doesn't introduce a lock_order requirement for + // `forward_htlcs` to be locked after the `channel_state` lock, which calling this + // function with the `channel_state` locked would. + assert!(self.channel_state.try_lock().is_ok()); + } + //TODO: There is a timing attack here where if a node fails an HTLC back to us they can //identify whether we sent it or not based on the (I presume) very different runtime //between the branches here. We should make this async and move it into the forward HTLCs @@ -3923,7 +3836,6 @@ impl ChannelMana log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0)); return; } - mem::drop(channel_state_lock); let mut retry = if let Some(payment_params_data) = payment_params { let path_last_hop = path.last().expect("Outbound payments must have had a valid path"); Some(RouteParameters { @@ -3966,7 +3878,7 @@ impl ChannelMana events::Event::PaymentPathFailed { payment_id: Some(payment_id), payment_hash: payment_hash.clone(), - rejected_by_dest: !payment_retryable, + payment_failed_permanently: !payment_retryable, network_update, all_paths_failed, path: path.clone(), @@ -3994,19 +3906,29 @@ impl ChannelMana // channel here as we apparently can't relay through them anyway. let scid = path.first().unwrap().short_channel_id; retry.as_mut().map(|r| r.payment_params.previously_failed_channels.push(scid)); - events::Event::PaymentPathFailed { - payment_id: Some(payment_id), - payment_hash: payment_hash.clone(), - rejected_by_dest: path.len() == 1, - network_update: None, - all_paths_failed, - path: path.clone(), - short_channel_id: Some(scid), - retry, + + if self.payment_is_probe(payment_hash, &payment_id) { + events::Event::ProbeFailed { + payment_id: payment_id, + payment_hash: payment_hash.clone(), + path: path.clone(), + short_channel_id: Some(scid), + } + } else { + events::Event::PaymentPathFailed { + payment_id: Some(payment_id), + payment_hash: payment_hash.clone(), + payment_failed_permanently: false, + network_update: None, + all_paths_failed, + path: path.clone(), + short_channel_id: Some(scid), + retry, #[cfg(test)] - error_code: Some(*failure_code), + error_code: Some(*failure_code), #[cfg(test)] - error_data: Some(data.clone()), + error_data: Some(data.clone()), + } } } }; @@ -4034,10 +3956,11 @@ impl ChannelMana }; let mut forward_event = None; - if channel_state_lock.forward_htlcs.is_empty() { + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + if forward_htlcs.is_empty() { forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)); } - match channel_state_lock.forward_htlcs.entry(short_channel_id) { + match forward_htlcs.entry(short_channel_id) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }); }, @@ -4045,7 +3968,7 @@ impl ChannelMana entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet })); } } - mem::drop(channel_state_lock); + mem::drop(forward_htlcs); let mut pending_events = self.pending_events.lock().unwrap(); if let Some(time) = forward_event { pending_events.push(events::Event::PendingHTLCsForwardable { @@ -4083,8 +4006,7 @@ impl ChannelMana let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let mut channel_state = Some(self.channel_state.lock().unwrap()); - let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash); + let removed_source = self.channel_state.lock().unwrap().claimable_htlcs.remove(&payment_hash); if let Some((payment_purpose, mut sources)) = removed_source { assert!(!sources.is_empty()); @@ -4102,8 +4024,12 @@ impl ChannelMana let mut claimable_amt_msat = 0; let mut expected_amt_msat = None; let mut valid_mpp = true; + let mut errs = Vec::new(); + let mut claimed_any_htlcs = false; + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; for htlc in sources.iter() { - if let None = channel_state.as_ref().unwrap().short_to_chan_info.get(&htlc.prev_hop.short_channel_id) { + if let None = channel_state.short_to_chan_info.get(&htlc.prev_hop.short_channel_id) { valid_mpp = false; break; } @@ -4136,21 +4062,9 @@ impl ChannelMana expected_amt_msat.unwrap(), claimable_amt_msat); return; } - - let mut errs = Vec::new(); - let mut claimed_any_htlcs = false; - for htlc in sources.drain(..) { - if !valid_mpp { - if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); } - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( - self.best_block.read().unwrap().height())); - self.fail_htlc_backwards_internal(channel_state.take().unwrap(), - HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, - HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }, - HTLCDestination::FailedPayment { payment_hash } ); - } else { - match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) { + if valid_mpp { + for htlc in sources.drain(..) { + match self.claim_funds_from_hop(&mut channel_state_lock, htlc.prev_hop, payment_preimage) { ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => { if let msgs::ErrorAction::IgnoreError = err.err.action { // We got a temporary failure updating monitor, but will claim the @@ -4170,6 +4084,18 @@ impl ChannelMana } } } + mem::drop(channel_state_lock); + if !valid_mpp { + for htlc in sources.drain(..) { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array( + self.best_block.read().unwrap().height())); + self.fail_htlc_backwards_internal( + HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash, + HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data }, + HTLCDestination::FailedPayment { payment_hash } ); + } + } if claimed_any_htlcs { self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed { @@ -4179,10 +4105,7 @@ impl ChannelMana }); } - // Now that we've done the entire above loop in one lock, we can handle any errors - // which were generated. - channel_state.take(); - + // Now we can handle any errors which were generated. for (counterparty_node_id, err) in errs.drain(..) { let res: Result<(), _> = Err(err); let _ = handle_error!(self, res, counterparty_node_id); @@ -4204,15 +4127,18 @@ impl ChannelMana match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) { Ok(msgs_monitor_option) => { if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug }, - "Failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, e); - return ClaimFundsFromHop::MonitorUpdateFail( - chan.get().get_counterparty_node_id(), - handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), - Some(htlc_value_msat) - ); + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + ChannelMonitorUpdateStatus::Completed => {}, + e => { + log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Debug }, + "Failed to update channel monitor with preimage {:?}: {:?}", + payment_preimage, e); + return ClaimFundsFromHop::MonitorUpdateFail( + chan.get().get_counterparty_node_id(), + handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(), + Some(htlc_value_msat) + ); + } } if let Some((msg, commitment_signed)) = msgs { log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}", @@ -4235,10 +4161,13 @@ impl ChannelMana } }, Err((e, monitor_update)) => { - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info }, - "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", - payment_preimage, e); + match self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { + ChannelMonitorUpdateStatus::Completed => {}, + e => { + log_given_level!(self.logger, if e == ChannelMonitorUpdateStatus::PermanentFailure { Level::Error } else { Level::Info }, + "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}", + payment_preimage, e); + }, } let counterparty_node_id = chan.get().get_counterparty_node_id(); let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_chan_info, chan.get_mut(), &chan_id); @@ -4345,9 +4274,14 @@ impl ChannelMana // We update the ChannelMonitor on the backward link, after // receiving an offchain preimage event from the forward link (the // event being update_fulfill_htlc). - if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) { + let update_res = self.chain_monitor.update_channel(prev_outpoint, preimage_update); + if update_res != ChannelMonitorUpdateStatus::Completed { + // TODO: This needs to be handled somehow - if we receive a monitor update + // with a preimage we *must* somehow manage to propagate it to the upstream + // channel, or we must have an ability to receive the same event and try + // again on restart. log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}", - payment_preimage, e); + payment_preimage, update_res); } // Note that we do *not* set `claimed_htlc` to false here. In fact, this // totally could be a duplicate claim, but we have no way of knowing @@ -4429,7 +4363,7 @@ impl ChannelMana self.finalize_claims(finalized_claims); for failure in pending_failures.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id: funding_txo.to_channel_id() }; - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver); + self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver); } } @@ -4612,29 +4546,28 @@ impl ChannelMana }; // Because we have exclusive ownership of the channel here we can release the channel_state // lock before watch_channel - if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) { - match e { - ChannelMonitorUpdateErr::PermanentFailure => { - // Note that we reply with the new channel_id in error messages if we gave up on the - // channel, not the temporary_channel_id. This is compatible with ourselves, but the - // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for - // any messages referencing a previously-closed channel anyway. - // We do not do a force-close here as that would generate a monitor update for - // a monitor that we didn't manage to store (and that we don't care about - we - // don't respond with the funding_signed so the channel can never go on chain). - let (_monitor_update, failed_htlcs) = chan.force_shutdown(true); - assert!(failed_htlcs.is_empty()); - return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id)); - }, - ChannelMonitorUpdateErr::TemporaryFailure => { - // There's no problem signing a counterparty's funding transaction if our monitor - // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't - // accepted payment from yet. We do, however, need to wait to send our channel_ready - // until we have persisted our monitor. - chan.monitor_update_failed(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new()); - channel_ready = None; // Don't send the channel_ready now - }, - } + match self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) { + ChannelMonitorUpdateStatus::Completed => {}, + ChannelMonitorUpdateStatus::PermanentFailure => { + // Note that we reply with the new channel_id in error messages if we gave up on the + // channel, not the temporary_channel_id. This is compatible with ourselves, but the + // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for + // any messages referencing a previously-closed channel anyway. + // We do not propagate the monitor update to the user as it would be for a monitor + // that we didn't manage to store (and that we don't care about - we don't respond + // with the funding_signed so the channel can never go on chain). + let (_monitor_update, failed_htlcs) = chan.force_shutdown(false); + assert!(failed_htlcs.is_empty()); + return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id)); + }, + ChannelMonitorUpdateStatus::InProgress => { + // There's no problem signing a counterparty's funding transaction if our monitor + // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't + // accepted payment from yet. We do, however, need to wait to send our channel_ready + // until we have persisted our monitor. + chan.monitor_updating_paused(false, false, channel_ready.is_some(), Vec::new(), Vec::new(), Vec::new()); + channel_ready = None; // Don't send the channel_ready now + }, } let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -4681,17 +4614,20 @@ impl ChannelMana Ok(update) => update, Err(e) => try_chan_entry!(self, Err(e), channel_state, chan), }; - if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { - let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); - if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { - // We weren't able to watch the channel to begin with, so no updates should be made on - // it. Previously, full_stack_target found an (unreachable) panic when the - // monitor update contained within `shutdown_finish` was applied. - if let Some((ref mut shutdown_finish, _)) = shutdown_finish { - shutdown_finish.0.take(); + match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) { + ChannelMonitorUpdateStatus::Completed => {}, + e => { + let mut res = handle_monitor_update_res!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED); + if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res { + // We weren't able to watch the channel to begin with, so no updates should be made on + // it. Previously, full_stack_target found an (unreachable) panic when the + // monitor update contained within `shutdown_finish` was applied. + if let Some((ref mut shutdown_finish, _)) = shutdown_finish { + shutdown_finish.0.take(); + } } - } - return res + return res + }, } if let Some(msg) = channel_ready { send_channel_ready!(channel_state.short_to_chan_info, channel_state.pending_msg_events, chan.get(), msg); @@ -4765,13 +4701,12 @@ impl ChannelMana // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update { - if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) { - let (result, is_permanent) = - handle_monitor_err!(self, e, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); - if is_permanent { - remove_channel!(self, channel_state, chan_entry); - break result; - } + let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update); + let (result, is_permanent) = + handle_monitor_update_res!(self, update_res, channel_state.short_to_chan_info, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE); + if is_permanent { + remove_channel!(self, channel_state, chan_entry); + break result; } } @@ -4789,7 +4724,7 @@ impl ChannelMana }; for htlc_source in dropped_htlcs.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id }; - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + self.fail_htlc_backwards_internal(htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } let _ = handle_error!(self, result, *counterparty_node_id); @@ -4850,7 +4785,8 @@ impl ChannelMana //encrypted with the same key. It's not immediately obvious how to usefully exploit that, //but we should prevent it anyway. - let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg); + let pending_forward_info = self.decode_update_add_htlc_onion(msg); + let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; match channel_state.by_id.entry(msg.channel_id) { @@ -4959,9 +4895,11 @@ impl ChannelMana }, Ok(res) => res }; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()); + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update); + if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some()) { + return Err(e); } + channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { node_id: counterparty_node_id.clone(), msg: revoke_and_ack, @@ -4990,12 +4928,12 @@ impl ChannelMana for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards { let mut forward_event = None; if !pending_forwards.is_empty() { - let mut channel_state = self.channel_state.lock().unwrap(); - if channel_state.forward_htlcs.is_empty() { + let mut forward_htlcs = self.forward_htlcs.lock().unwrap(); + if forward_htlcs.is_empty() { forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS)) } for (forward_info, prev_htlc_id) in pending_forwards.drain(..) { - match channel_state.forward_htlcs.entry(match forward_info.routing { + match forward_htlcs.entry(match forward_info.routing { PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id, PendingHTLCRouting::Receive { .. } => 0, PendingHTLCRouting::ReceiveKeysend { .. } => 0, @@ -5033,26 +4971,27 @@ impl ChannelMana if chan.get().get_counterparty_node_id() != *counterparty_node_id { break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id)); } - let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update(); + let was_paused_for_mon_update = chan.get().is_awaiting_monitor_update(); let raa_updates = break_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan); htlcs_to_fail = raa_updates.holding_cell_failed_htlcs; - if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) { - if was_frozen_for_monitor { - assert!(raa_updates.commitment_update.is_none()); - assert!(raa_updates.accepted_htlcs.is_empty()); - assert!(raa_updates.failed_htlcs.is_empty()); - assert!(raa_updates.finalized_claimed_htlcs.is_empty()); - break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned())); - } else { - if let Err(e) = handle_monitor_err!(self, e, channel_state, chan, - RAACommitmentOrder::CommitmentFirst, false, - raa_updates.commitment_update.is_some(), false, - raa_updates.accepted_htlcs, raa_updates.failed_htlcs, - raa_updates.finalized_claimed_htlcs) { - break Err(e); - } else { unreachable!(); } - } + let update_res = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update); + if was_paused_for_mon_update { + assert!(update_res != ChannelMonitorUpdateStatus::Completed); + assert!(raa_updates.commitment_update.is_none()); + assert!(raa_updates.accepted_htlcs.is_empty()); + assert!(raa_updates.failed_htlcs.is_empty()); + assert!(raa_updates.finalized_claimed_htlcs.is_empty()); + break Err(MsgHandleErrInternal::ignore_no_close("Existing pending monitor update prevented responses to RAA".to_owned())); + } + if update_res != ChannelMonitorUpdateStatus::Completed { + if let Err(e) = handle_monitor_update_res!(self, update_res, channel_state, chan, + RAACommitmentOrder::CommitmentFirst, false, + raa_updates.commitment_update.is_some(), false, + raa_updates.accepted_htlcs, raa_updates.failed_htlcs, + raa_updates.finalized_claimed_htlcs) { + break Err(e); + } else { unreachable!(); } } if let Some(updates) = raa_updates.commitment_update { channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { @@ -5076,7 +5015,7 @@ impl ChannelMana { for failure in pending_failures.drain(..) { let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: channel_outpoint.to_channel_id() }; - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2, receiver); + self.fail_htlc_backwards_internal(failure.0, &failure.1, failure.2, receiver); } self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]); self.finalize_claims(finalized_claim_htlcs); @@ -5154,6 +5093,7 @@ impl ChannelMana if were_node_one == msg_from_node_one { return Ok(NotifyOption::SkipPersist); } else { + log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id)); try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan); } }, @@ -5233,7 +5173,7 @@ impl ChannelMana } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + self.fail_htlc_backwards_internal(htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } }, MonitorEvent::CommitmentTxConfirmed(funding_outpoint) | @@ -5264,7 +5204,7 @@ impl ChannelMana }); } }, - MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => { + MonitorEvent::Completed { funding_txo, monitor_update_id } => { self.channel_monitor_updated(&funding_txo, monitor_update_id); }, } @@ -5316,16 +5256,19 @@ impl ChannelMana )); } if let Some((commitment_update, monitor_update)) = commitment_opt { - if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { - has_monitor_update = true; - let (res, close_channel) = handle_monitor_err!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); - handle_errors.push((chan.get_counterparty_node_id(), res)); - if close_channel { return false; } - } else { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get_counterparty_node_id(), - updates: commitment_update, - }); + match self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + ChannelMonitorUpdateStatus::Completed => { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: commitment_update, + }); + }, + e => { + has_monitor_update = true; + let (res, close_channel) = handle_monitor_update_res!(self, e, short_to_chan_info, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY); + handle_errors.push((chan.get_counterparty_node_id(), res)); + if close_channel { return false; } + }, } } true @@ -5676,10 +5619,6 @@ where /// /// An [`EventHandler`] may safely call back to the provider in order to handle an event. /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock. - /// - /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared - /// when processed, an [`EventHandler`] must be able to handle previously seen events when - /// restarting from an old state. fn process_pending_events(&self, handler: H) where H::Target: EventHandler { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut result = NotifyOption::SkipPersist; @@ -5798,7 +5737,6 @@ where } } } - max_time!(self.last_node_announcement_serial); max_time!(self.highest_seen_timestamp); let mut payment_secrets = self.pending_inbound_payments.lock().unwrap(); payment_secrets.retain(|_, inbound_payment| { @@ -5972,7 +5910,7 @@ where self.handle_init_event_channel_failures(failed_channels); for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) { - self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason, destination); + self.fail_htlc_backwards_internal(source, &payment_hash, reason, destination); } } @@ -5994,12 +5932,16 @@ where self.persistence_notifier.wait() } + /// Gets a [`Future`] that completes when a persistable update is available. Note that + /// callbacks registered on the [`Future`] MUST NOT call back into this [`ChannelManager`] and + /// should instead register actions to be taken later. + pub fn get_persistable_update_future(&self) -> Future { + self.persistence_notifier.get_future() + } + #[cfg(any(test, feature = "_test_utils"))] pub fn get_persistence_condvar_value(&self) -> bool { - let mutcond = &self.persistence_notifier.persistence_lock; - let &(ref mtx, _) = mutcond; - let guard = mtx.lock().unwrap(); - *guard + self.persistence_notifier.notify_pending() } /// Gets the latest best block which was connected either via the [`chain::Listen`] or @@ -6144,8 +6086,8 @@ impl &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id, + &events::MessageSendEvent::SendChannelAnnouncement { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true, - &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true, &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true, &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id, &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id, @@ -6165,7 +6107,12 @@ impl } } - fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) { + fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) -> Result<(), ()> { + if !init_msg.features.supports_static_remote_key() { + log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting with no_connection_possible", log_pubkey!(counterparty_node_id)); + return Err(()); + } + log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id)); let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); @@ -6188,7 +6135,7 @@ impl let channel_state = &mut *channel_state_lock; let pending_msg_events = &mut channel_state.pending_msg_events; channel_state.by_id.retain(|_, chan| { - if chan.get_counterparty_node_id() == *counterparty_node_id { + let retain = if chan.get_counterparty_node_id() == *counterparty_node_id { if !chan.have_received_message() { // If we created this (outbound) channel while we were disconnected from the // peer we probably failed to send the open_channel message, which is now @@ -6202,9 +6149,21 @@ impl }); true } - } else { true } + } else { true }; + if retain && chan.get_counterparty_node_id() != *counterparty_node_id { + if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) { + if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) { + pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement { + node_id: *counterparty_node_id, + msg, update_msg, + }); + } + } + } + retain }); //TODO: Also re-broadcast announcement_signatures + Ok(()) } fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) { @@ -6239,77 +6198,57 @@ impl let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true); } } -} -/// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to -/// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`. -struct PersistenceNotifier { - /// Users won't access the persistence_lock directly, but rather wait on its bool using - /// `wait_timeout` and `wait`. - persistence_lock: (Mutex, Condvar), -} - -impl PersistenceNotifier { - fn new() -> Self { - Self { - persistence_lock: (Mutex::new(false), Condvar::new()), - } + fn provided_node_features(&self) -> NodeFeatures { + provided_node_features() } - fn wait(&self) { - loop { - let &(ref mtx, ref cvar) = &self.persistence_lock; - let mut guard = mtx.lock().unwrap(); - if *guard { - *guard = false; - return; - } - guard = cvar.wait(guard).unwrap(); - let result = *guard; - if result { - *guard = false; - return - } - } + fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures { + provided_init_features() } +} - #[cfg(any(test, feature = "std"))] - fn wait_timeout(&self, max_wait: Duration) -> bool { - let current_time = Instant::now(); - loop { - let &(ref mtx, ref cvar) = &self.persistence_lock; - let mut guard = mtx.lock().unwrap(); - if *guard { - *guard = false; - return true; - } - guard = cvar.wait_timeout(guard, max_wait).unwrap().0; - // Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the - // desired wait time has actually passed, and if not then restart the loop with a reduced wait - // time. Note that this logic can be highly simplified through the use of - // `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to - // 1.42.0. - let elapsed = current_time.elapsed(); - let result = *guard; - if result || elapsed >= max_wait { - *guard = false; - return result; - } - match max_wait.checked_sub(elapsed) { - None => return result, - Some(_) => continue - } - } - } +/// Fetches the set of [`NodeFeatures`] flags which are provided by or required by +/// [`ChannelManager`]. +pub fn provided_node_features() -> NodeFeatures { + provided_init_features().to_context() +} - // Signal to the ChannelManager persister that there are updates necessitating persisting to disk. - fn notify(&self) { - let &(ref persist_mtx, ref cnd) = &self.persistence_lock; - let mut persistence_lock = persist_mtx.lock().unwrap(); - *persistence_lock = true; - mem::drop(persistence_lock); - cnd.notify_all(); - } +/// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by +/// [`ChannelManager`]. +/// +/// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice" +/// or not. Thus, this method is not public. +#[cfg(any(feature = "_test_utils", test))] +pub fn provided_invoice_features() -> InvoiceFeatures { + provided_init_features().to_context() +} + +/// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by +/// [`ChannelManager`]. +pub fn provided_channel_features() -> ChannelFeatures { + provided_init_features().to_context() +} + +/// Fetches the set of [`InitFeatures`] flags which are provided by or required by +/// [`ChannelManager`]. +pub fn provided_init_features() -> InitFeatures { + // Note that if new features are added here which other peers may (eventually) require, we + // should also add the corresponding (optional) bit to the ChannelMessageHandler impl for + // ErroringMessageHandler. + let mut features = InitFeatures::empty(); + features.set_data_loss_protect_optional(); + features.set_upfront_shutdown_script_optional(); + features.set_variable_length_onion_required(); + features.set_static_remote_key_required(); + features.set_payment_secret_required(); + features.set_basic_mpp_optional(); + features.set_wumbo_optional(); + features.set_shutdown_any_segwit_optional(); + features.set_channel_type_optional(); + features.set_scid_privacy_optional(); + features.set_zero_conf_optional(); + features } const SERIALIZATION_VERSION: u8 = 1; @@ -6670,29 +6609,37 @@ impl Writeable f best_block.block_hash().write(writer)?; } - let channel_state = self.channel_state.lock().unwrap(); - let mut unfunded_channels = 0; - for (_, channel) in channel_state.by_id.iter() { - if !channel.is_funding_initiated() { - unfunded_channels += 1; + { + // Take `channel_state` lock temporarily to avoid creating a lock order that requires + // that the `forward_htlcs` lock is taken after `channel_state` + let channel_state = self.channel_state.lock().unwrap(); + let mut unfunded_channels = 0; + for (_, channel) in channel_state.by_id.iter() { + if !channel.is_funding_initiated() { + unfunded_channels += 1; + } } - } - ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?; - for (_, channel) in channel_state.by_id.iter() { - if channel.is_funding_initiated() { - channel.write(writer)?; + ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?; + for (_, channel) in channel_state.by_id.iter() { + if channel.is_funding_initiated() { + channel.write(writer)?; + } } } - (channel_state.forward_htlcs.len() as u64).write(writer)?; - for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() { - short_channel_id.write(writer)?; - (pending_forwards.len() as u64).write(writer)?; - for forward in pending_forwards { - forward.write(writer)?; + { + let forward_htlcs = self.forward_htlcs.lock().unwrap(); + (forward_htlcs.len() as u64).write(writer)?; + for (short_channel_id, pending_forwards) in forward_htlcs.iter() { + short_channel_id.write(writer)?; + (pending_forwards.len() as u64).write(writer)?; + for forward in pending_forwards { + forward.write(writer)?; + } } } + let channel_state = self.channel_state.lock().unwrap(); let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); (channel_state.claimable_htlcs.len() as u64).write(writer)?; for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { @@ -6732,7 +6679,10 @@ impl Writeable f } } - (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?; + // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in + // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is + // likely to be identical. + (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?; (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?; (pending_inbound_payments.len() as u64).write(writer)?; @@ -7051,7 +7001,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> } } - let last_node_announcement_serial: u32 = Readable::read(reader)?; + let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111 let highest_seen_timestamp: u32 = Readable::read(reader)?; let pending_inbound_payment_count: u64 = Readable::read(reader)?; @@ -7294,7 +7244,6 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel_state: Mutex::new(ChannelHolder { by_id, short_to_chan_info, - forward_htlcs, claimable_htlcs, pending_msg_events: Vec::new(), }), @@ -7302,6 +7251,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_inbound_payments: Mutex::new(pending_inbound_payments), pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), + forward_htlcs: Mutex::new(forward_htlcs), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), id_to_peer: Mutex::new(id_to_peer), fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(), @@ -7312,7 +7262,6 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> our_network_pubkey, secp_ctx, - last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize), highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize), per_peer_state: RwLock::new(per_peer_state), @@ -7320,7 +7269,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_events: Mutex::new(pending_events_read), pending_background_events: Mutex::new(pending_background_events_read), total_consistency_lock: RwLock::new(()), - persistence_notifier: PersistenceNotifier::new(), + persistence_notifier: Notifier::new(), keys_manager: args.keys_manager, logger: args.logger, @@ -7330,7 +7279,7 @@ impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> for htlc_source in failed_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; - channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); + channel_manager.fail_htlc_backwards_internal(source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() }, receiver); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -7347,9 +7296,7 @@ mod tests { use core::time::Duration; use core::sync::atomic::Ordering; use ln::{PaymentPreimage, PaymentHash, PaymentSecret}; - use ln::channelmanager::{PaymentId, PaymentSendFailure}; - use ln::channelmanager::inbound_payment; - use ln::features::InitFeatures; + use ln::channelmanager::{self, inbound_payment, PaymentId, PaymentSendFailure}; use ln::functional_test_utils::*; use ln::msgs; use ln::msgs::ChannelMessageHandler; @@ -7359,54 +7306,6 @@ mod tests { use util::test_utils; use chain::keysinterface::KeysInterface; - #[cfg(feature = "std")] - #[test] - fn test_wait_timeout() { - use ln::channelmanager::PersistenceNotifier; - use sync::Arc; - use core::sync::atomic::AtomicBool; - use std::thread; - - let persistence_notifier = Arc::new(PersistenceNotifier::new()); - let thread_notifier = Arc::clone(&persistence_notifier); - - let exit_thread = Arc::new(AtomicBool::new(false)); - let exit_thread_clone = exit_thread.clone(); - thread::spawn(move || { - loop { - let &(ref persist_mtx, ref cnd) = &thread_notifier.persistence_lock; - let mut persistence_lock = persist_mtx.lock().unwrap(); - *persistence_lock = true; - cnd.notify_all(); - - if exit_thread_clone.load(Ordering::SeqCst) { - break - } - } - }); - - // Check that we can block indefinitely until updates are available. - let _ = persistence_notifier.wait(); - - // Check that the PersistenceNotifier will return after the given duration if updates are - // available. - loop { - if persistence_notifier.wait_timeout(Duration::from_millis(100)) { - break - } - } - - exit_thread.store(true, Ordering::SeqCst); - - // Check that the PersistenceNotifier will return after the given duration even if no updates - // are available. - loop { - if !persistence_notifier.wait_timeout(Duration::from_millis(100)) { - break - } - } - } - #[test] fn test_notify_limits() { // Check that a few cases which don't require the persistence of a new ChannelManager, @@ -7422,7 +7321,7 @@ mod tests { assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1))); assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1))); - let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); // We check that the channel info nodes have doesn't change too early, even though we try // to connect messages with new values @@ -7493,7 +7392,7 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); // First, send a partial MPP payment. let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000); @@ -7611,7 +7510,7 @@ mod tests { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()); let scorer = test_utils::TestScorer::with_penalty(0); let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes(); @@ -7709,10 +7608,10 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey), final_value_msat: 10000, @@ -7753,10 +7652,10 @@ mod tests { let payer_pubkey = nodes[0].node.get_our_node_id(); let payee_pubkey = nodes[1].node.get_our_node_id(); - nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); - nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None }); + nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); - let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known()); + let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], channelmanager::provided_init_features(), channelmanager::provided_init_features()); let route_params = RouteParameters { payment_params: PaymentParameters::for_keysend(payee_pubkey), final_value_msat: 10000, @@ -7795,10 +7694,10 @@ mod tests { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let nodes = create_network(4, &node_cfgs, &node_chanmgrs); - let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; - let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id; + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; + let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; + let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; + let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, channelmanager::provided_init_features(), channelmanager::provided_init_features()).0.contents.short_channel_id; // Marshall an MPP route. let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); @@ -7859,9 +7758,9 @@ mod tests { nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap(); let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), InitFeatures::known(), &open_channel); + nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), channelmanager::provided_init_features(), &open_channel); let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), InitFeatures::known(), &accept_channel); + nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), channelmanager::provided_init_features(), &accept_channel); let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); let channel_id = &tx.txid().into_inner(); @@ -7906,9 +7805,9 @@ mod tests { update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update); nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap(); - nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &InitFeatures::known(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &InitFeatures::known(), &nodes_1_shutdown); + nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &channelmanager::provided_init_features(), &nodes_1_shutdown); let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id()); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0); @@ -7966,7 +7865,7 @@ pub mod bench { use chain::Listen; use chain::chainmonitor::{ChainMonitor, Persist}; use chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner}; - use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; + use ln::channelmanager::{self, BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage}; use ln::features::{InitFeatures, InvoiceFeatures}; use ln::functional_test_utils::*; use ln::msgs::{ChannelMessageHandler, Init}; @@ -7978,7 +7877,7 @@ pub mod bench { use bitcoin::hashes::Hash; use bitcoin::hashes::sha256::Hash as Sha256; - use bitcoin::{Block, BlockHeader, Transaction, TxOut}; + use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut}; use sync::{Arc, Mutex}; @@ -8032,15 +7931,15 @@ pub mod bench { }); let node_b_holder = NodeHolder { node: &node_b }; - node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None }); - node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None }); + node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); + node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: channelmanager::provided_init_features(), remote_network_address: None }).unwrap(); node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap(); - node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); - node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); + node_b.handle_open_channel(&node_a.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id())); + node_a.handle_accept_channel(&node_b.get_our_node_id(), channelmanager::provided_init_features(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id())); let tx; if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) { - tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut { + tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut { value: 8_000_000, script_pubkey: output_script, }]}; node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap(); @@ -8052,7 +7951,7 @@ pub mod bench { assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]); let block = Block { - header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 }, + header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 }, txdata: vec![tx], }; Listen::block_connected(&node_a, &block, 1); @@ -8080,7 +7979,7 @@ pub mod bench { ($node_a: expr, $node_b: expr) => { let usable_channels = $node_a.list_usable_channels(); let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id()) - .with_features(InvoiceFeatures::known()); + .with_features(channelmanager::provided_invoice_features()); let scorer = test_utils::TestScorer::with_penalty(0); let seed = [3u8; 32]; let keys_manager = KeysManager::new(&seed, 42, 42);