use chain::transaction::{OutPoint, TransactionData};
// Since this struct is returned in `list_channels` methods, expose it here in case users want to
// construct one themselves.
+use ln::{PaymentHash, PaymentPreimage, PaymentSecret};
pub use ln::channel::CounterpartyForwardingInfo;
-use ln::channel::{Channel, ChannelError};
+use ln::channel::{Channel, ChannelError, ChannelUpdateStatus};
use ln::features::{InitFeatures, NodeFeatures};
use routing::router::{Route, RouteHop};
use ln::msgs;
use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, OptionalField};
use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner};
use util::config::UserConfig;
-use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
use util::{byte_utils, events};
use util::ser::{Readable, ReadableArgs, MaybeReadable, Writeable, Writer};
use util::chacha20::{ChaCha20, ChaChaReader};
use util::logger::Logger;
use util::errors::APIError;
-use std::{cmp, mem};
+use core::{cmp, mem};
+use std::cell::RefCell;
use std::collections::{HashMap, hash_map, HashSet};
use std::io::{Cursor, Read};
use std::sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
-use std::sync::atomic::{AtomicUsize, Ordering};
-use std::time::Duration;
+use core::sync::atomic::{AtomicUsize, Ordering};
+use core::time::Duration;
#[cfg(any(test, feature = "allow_wallclock_use"))]
use std::time::Instant;
-use std::ops::Deref;
+use core::ops::Deref;
use bitcoin::hashes::hex::ToHex;
// We hold various information about HTLC relay in the HTLC objects in Channel itself:
}
}
-/// payment_hash type, use to cross-lock hop
-/// (C-not exported) as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentHash(pub [u8;32]);
-/// payment_preimage type, use to route payment between hop
-/// (C-not exported) as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentPreimage(pub [u8;32]);
-/// payment_secret type, use to authenticate sender to the receiver and tie MPP HTLCs together
-/// (C-not exported) as we just use [u8; 32] directly
-#[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
-pub struct PaymentSecret(pub [u8;32]);
-
type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
/// Error type returned across the channel_state mutex boundary. When an Err is generated for a
/// Locked *after* channel_state.
pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
+ /// The session_priv bytes of outbound payments which are pending resolution.
+ /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
+ /// (if the channel has been force-closed), however we track them here to prevent duplicative
+ /// PaymentSent/PaymentFailed events. Specifically, in the case of a duplicative
+ /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
+ /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
+ /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
+ /// after reloading from disk while replaying blocks against ChannelMonitors.
+ ///
+ /// Locked *after* channel_state.
+ pending_outbound_payments: Mutex<HashSet<[u8; 32]>>,
+
our_network_key: SecretKey,
our_network_pubkey: PublicKey,
/// Essentially just when we're serializing ourselves out.
/// Taken first everywhere where we are making changes before any other locks.
/// When acquiring this lock in read mode, rather than acquiring it directly, call
- /// `PersistenceNotifierGuard::new(..)` and pass the lock to it, to ensure the PersistenceNotifier
- /// the lock contains sends out a notification when the lock is released.
+ /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
+ /// PersistenceNotifier the lock contains sends out a notification when the lock is released.
total_consistency_lock: RwLock<()>,
persistence_notifier: PersistenceNotifier,
pub fn height(&self) -> u32 { self.height }
}
+#[derive(Copy, Clone, PartialEq)]
+enum NotifyOption {
+ DoPersist,
+ SkipPersist,
+}
+
/// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
/// desirable to notify any listeners on `await_persistable_update_timeout`/
-/// `await_persistable_update` that new updates are available for persistence. Therefore, this
+/// `await_persistable_update` when new updates are available for persistence. Therefore, this
/// struct is responsible for locking the total consistency lock and, upon going out of scope,
/// sending the aforementioned notification (since the lock being released indicates that the
/// updates are ready for persistence).
-struct PersistenceNotifierGuard<'a> {
+///
+/// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
+/// notify or not based on whether relevant changes have been made, providing a closure to
+/// `optionally_notify` which returns a `NotifyOption`.
+struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
persistence_notifier: &'a PersistenceNotifier,
+ should_persist: F,
// We hold onto this result so the lock doesn't get released immediately.
_read_guard: RwLockReadGuard<'a, ()>,
}
-impl<'a> PersistenceNotifierGuard<'a> {
- fn new(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> Self {
+impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
+ fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
+ PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
+ }
+
+ fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
let read_guard = lock.read().unwrap();
- Self {
+ PersistenceNotifierGuard {
persistence_notifier: notifier,
+ should_persist: persist_check,
_read_guard: read_guard,
}
}
}
-impl<'a> Drop for PersistenceNotifierGuard<'a> {
+impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
fn drop(&mut self) {
- self.persistence_notifier.notify();
+ if (self.should_persist)() == NotifyOption::DoPersist {
+ self.persistence_notifier.notify();
+ }
}
}
pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
/// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
-/// HTLC's CLTV. The current default represents roughly six hours of blocks at six blocks/hour.
+/// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
///
/// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
///
// i.e. the node we forwarded the payment on to should always have enough room to reliably time out
// the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
// CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
-pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6 * 6;
+pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
/// Minimum CLTV difference between the current block height and received inbound payments.
/// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least
/// this value.
-pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER;
+// Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
+// any payments to succeed. Further, we don't want payments to fail if a block was found while
+// a payment was being routed, so we add an extra block to be safe.
+pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3;
// Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
// ie that if the next-hop peer fails the HTLC within
#[allow(dead_code)]
const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
-// Check for ability of an attacker to make us fail on-chain by delaying inbound claim. See
+// Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
// ChannelMontior::would_broadcast_at_height for a description of why this is needed.
#[deny(const_err)]
#[allow(dead_code)]
/// Note that this means this value is *not* persistent - it can change once during the
/// lifetime of the channel.
pub channel_id: [u8; 32],
+ /// The Channel's funding transaction output, if we've negotiated the funding transaction with
+ /// our counterparty already.
+ ///
+ /// Note that, if this has been set, `channel_id` will be equivalent to
+ /// `funding_txo.unwrap().to_channel_id()`.
+ pub funding_txo: Option<OutPoint>,
/// The position of the funding transaction in the chain. None if the funding transaction has
/// not yet been confirmed and the channel fully opened.
pub short_channel_id: Option<u64>,
/// Note that there are some corner cases not fully handled here, so the actual available
/// inbound capacity may be slightly higher than this.
pub inbound_capacity_msat: u64,
+ /// True if the channel was initiated (and thus funded) by us.
+ pub is_outbound: bool,
+ /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
+ /// channel is not currently being shut down. `funding_locked` message exchange implies the
+ /// required confirmation count has been reached (and we were connected to the peer at some
+ /// point after the funding transaction received enough confirmations).
+ pub is_funding_locked: bool,
/// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
- /// the peer is connected, and (c) no monitor update failure is pending resolution.
- pub is_live: bool,
-
+ /// the peer is connected, (c) no monitor update failure is pending resolution, and (d) the
+ /// channel is not currently negotiating a shutdown.
+ ///
+ /// This is a strict superset of `is_funding_locked`.
+ pub is_usable: bool,
+ /// True if this channel is (or will be) publicly-announced.
+ pub is_public: bool,
/// Information on the fees and requirements that the counterparty requires when forwarding
/// payments to us through this channel.
pub counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
}
}
+/// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
+macro_rules! convert_chan_err {
+ ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
+ match $err {
+ ChannelError::Ignore(msg) => {
+ (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
+ },
+ ChannelError::Close(msg) => {
+ log_trace!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
+ if let Some(short_id) = $channel.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
+ }
+ let shutdown_res = $channel.force_shutdown(true);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+ },
+ ChannelError::CloseDelayBroadcast(msg) => {
+ log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
+ if let Some(short_id) = $channel.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
+ }
+ let shutdown_res = $channel.force_shutdown(false);
+ (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $self.get_channel_update(&$channel).ok()))
+ }
+ }
+ }
+}
+
macro_rules! break_chan_entry {
($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
match $res {
Ok(res) => res,
- Err(ChannelError::Ignore(msg)) => {
- break Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
- },
- Err(ChannelError::Close(msg)) => {
- log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ Err(e) => {
+ let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ if drop {
+ $entry.remove_entry();
}
- break Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
- },
- Err(ChannelError::CloseDelayBroadcast(_)) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
+ break Err(res);
+ }
}
}
}
($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
match $res {
Ok(res) => res,
- Err(ChannelError::Ignore(msg)) => {
- return Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $entry.key().clone()))
- },
- Err(ChannelError::Close(msg)) => {
- log_trace!($self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
- }
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()))
- },
- Err(ChannelError::CloseDelayBroadcast(msg)) => {
- log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($entry.key()[..]), msg);
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ Err(e) => {
+ let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
+ if drop {
+ $entry.remove_entry();
}
- let shutdown_res = chan.force_shutdown(false);
- return Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, $self.get_channel_update(&chan).ok()))
+ return Err(res);
}
}
}
($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new())
};
- ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
+ ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $chan_id: expr) => {
match $err {
ChannelMonitorUpdateErr::PermanentFailure => {
- log_error!($self.logger, "Closing channel {} due to monitor update PermanentFailure", log_bytes!($entry.key()[..]));
- let (channel_id, mut chan) = $entry.remove_entry();
- if let Some(short_id) = chan.get_short_channel_id() {
- $channel_state.short_to_id.remove(&short_id);
+ log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
+ if let Some(short_id) = $chan.get_short_channel_id() {
+ $short_to_id.remove(&short_id);
}
// TODO: $failed_fails is dropped here, which will cause other channels to hit the
// chain in a confused state! We need to move them into the ChannelMonitor which
// splitting hairs we'd prefer to claim payments that were to us, but we haven't
// given up the preimage yet, so might as well just wait until the payment is
// retried, avoiding the on-chain fees.
- let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), channel_id, chan.force_shutdown(true), $self.get_channel_update(&chan).ok()));
- res
+ let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.force_shutdown(true), $self.get_channel_update(&$chan).ok()));
+ (res, true)
},
ChannelMonitorUpdateErr::TemporaryFailure => {
log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards and {} fails",
- log_bytes!($entry.key()[..]),
+ log_bytes!($chan_id[..]),
if $resend_commitment && $resend_raa {
match $action_type {
RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
if !$resend_raa {
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
}
- $entry.get_mut().monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
- Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$entry.key()))
+ $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
+ (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
},
}
- }
+ };
+ ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => { {
+ let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $entry.key());
+ if drop {
+ $entry.remove_entry();
+ }
+ res
+ } };
}
macro_rules! return_monitor_err {
}
}
+macro_rules! handle_chan_restoration_locked {
+ ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
+ $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
+ $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr) => { {
+ let mut htlc_forwards = None;
+ let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
+
+ let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
+ let chanmon_update_is_none = chanmon_update.is_none();
+ let res = loop {
+ let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
+ if !forwards.is_empty() {
+ htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
+ $channel_entry.get().get_funding_txo().unwrap(), forwards));
+ }
+
+ if chanmon_update.is_some() {
+ // On reconnect, we, by definition, only resend a funding_locked if there have been
+ // no commitment updates, so the only channel monitor update which could also be
+ // associated with a funding_locked would be the funding_created/funding_signed
+ // monitor update. That monitor update failing implies that we won't send
+ // funding_locked until it's been updated, so we can't have a funding_locked and a
+ // monitor update here (so we don't bother to handle it correctly below).
+ assert!($funding_locked.is_none());
+ // A channel monitor update makes no sense without either a funding_locked or a
+ // commitment update to process after it. Since we can't have a funding_locked, we
+ // only bother to handle the monitor-update + commitment_update case below.
+ assert!($commitment_update.is_some());
+ }
+
+ if let Some(msg) = $funding_locked {
+ // Similar to the above, this implies that we're letting the funding_locked fly
+ // before it should be allowed to.
+ assert!(chanmon_update.is_none());
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
+ node_id: counterparty_node_id,
+ msg,
+ });
+ if let Some(announcement_sigs) = $self.get_announcement_sigs($channel_entry.get()) {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
+ node_id: counterparty_node_id,
+ msg: announcement_sigs,
+ });
+ }
+ $channel_state.short_to_id.insert($channel_entry.get().get_short_channel_id().unwrap(), $channel_entry.get().channel_id());
+ }
+
+ let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
+ if let Some(monitor_update) = chanmon_update {
+ // We only ever broadcast a funding transaction in response to a funding_signed
+ // message and the resulting monitor update. Thus, on channel_reestablish
+ // message handling we can't have a funding transaction to broadcast. When
+ // processing a monitor update finishing resulting in a funding broadcast, we
+ // cannot have a second monitor update, thus this case would indicate a bug.
+ assert!(funding_broadcastable.is_none());
+ // Given we were just reconnected or finished updating a channel monitor, the
+ // only case where we can get a new ChannelMonitorUpdate would be if we also
+ // have some commitment updates to send as well.
+ assert!($commitment_update.is_some());
+ if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
+ // channel_reestablish doesn't guarantee the order it returns is sensical
+ // for the messages it returns, but if we're setting what messages to
+ // re-transmit on monitor update success, we need to make sure it is sane.
+ let mut order = $order;
+ if $raa.is_none() {
+ order = RAACommitmentOrder::CommitmentFirst;
+ }
+ break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
+ }
+ }
+
+ macro_rules! handle_cs { () => {
+ if let Some(update) = $commitment_update {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: counterparty_node_id,
+ updates: update,
+ });
+ }
+ } }
+ macro_rules! handle_raa { () => {
+ if let Some(revoke_and_ack) = $raa {
+ $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
+ node_id: counterparty_node_id,
+ msg: revoke_and_ack,
+ });
+ }
+ } }
+ match $order {
+ RAACommitmentOrder::CommitmentFirst => {
+ handle_cs!();
+ handle_raa!();
+ },
+ RAACommitmentOrder::RevokeAndACKFirst => {
+ handle_raa!();
+ handle_cs!();
+ },
+ }
+ if let Some(tx) = funding_broadcastable {
+ log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
+ $self.tx_broadcaster.broadcast_transaction(&tx);
+ }
+ break Ok(());
+ };
+
+ if chanmon_update_is_none {
+ // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
+ // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
+ // should *never* end up calling back to `chain_monitor.update_channel()`.
+ assert!(res.is_ok());
+ }
+
+ (htlc_forwards, res, counterparty_node_id)
+ } }
+}
+
+macro_rules! post_handle_chan_restoration {
+ ($self: ident, $locked_res: expr) => { {
+ let (htlc_forwards, res, counterparty_node_id) = $locked_res;
+
+ let _ = handle_error!($self, res, counterparty_node_id);
+
+ if let Some(forwards) = htlc_forwards {
+ $self.forward_htlcs(&mut [forwards][..]);
+ }
+ } }
+}
+
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
where M::Target: chain::Watch<Signer>,
T::Target: BroadcasterInterface,
pending_msg_events: Vec::new(),
}),
pending_inbound_payments: Mutex::new(HashMap::new()),
+ pending_outbound_payments: Mutex::new(HashSet::new()),
our_network_key: keys_manager.get_node_secret(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret()),
let channel = Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key, channel_value_satoshis, push_msat, user_id, config)?;
let res = channel.get_open_channel(self.genesis_hash.clone());
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
// We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
debug_assert!(&self.total_consistency_lock.try_write().is_err());
let (inbound_capacity_msat, outbound_capacity_msat) = channel.get_inbound_outbound_available_balance_msat();
res.push(ChannelDetails {
channel_id: (*channel_id).clone(),
+ funding_txo: channel.get_funding_txo(),
short_channel_id: channel.get_short_channel_id(),
remote_network_id: channel.get_counterparty_node_id(),
counterparty_features: InitFeatures::empty(),
inbound_capacity_msat,
outbound_capacity_msat,
user_id: channel.get_user_id(),
- is_live: channel.is_live(),
+ is_outbound: channel.is_outbound(),
+ is_funding_locked: channel.is_usable(),
+ is_usable: channel.is_live(),
+ is_public: channel.should_announce(),
counterparty_forwarding_info: channel.counterparty_forwarding_info(),
});
}
/// Gets the list of usable channels, in random order. Useful as an argument to
/// get_route to ensure non-announced channels are used.
///
- /// These are guaranteed to have their is_live value set to true, see the documentation for
- /// ChannelDetails::is_live for more info on exactly what the criteria are.
+ /// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
+ /// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
+ /// are.
pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
// Note we use is_live here instead of usable which leads to somewhat confused
// internal/external nomenclature, but that's ok cause that's probably what the user
///
/// May generate a SendShutdown message event on success, which should be relayed.
pub fn close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let (mut failed_htlcs, chan_option) = {
let mut channel_state_lock = self.channel_state.lock().unwrap();
/// Force closes a channel, immediately broadcasting the latest local commitment transaction to
/// the chain and rejecting new HTLCs on the given channel. Fails if channel_id is unknown to the manager.
pub fn force_close_channel(&self, channel_id: &[u8; 32]) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
match self.force_close_channel_with_peer(channel_id, None) {
Ok(counterparty_node_id) => {
self.channel_state.lock().unwrap().pending_msg_events.push(
pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32) -> Result<(), APIError> {
log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
let prng_seed = self.keys_manager.get_secure_random_bytes();
- let session_priv = SecretKey::from_slice(&self.keys_manager.get_secure_random_bytes()[..]).expect("RNG is busted");
+ let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
+ let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
.map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
}
let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
+ assert!(self.pending_outbound_payments.lock().unwrap().insert(session_priv_bytes));
let err: Result<(), _> = loop {
let mut channel_lock = self.channel_state.lock().unwrap();
/// Note that this includes RBF or similar transaction replacement strategies - lightning does
/// not currently support replacing a funding transaction on an existing channel. Instead,
/// create a new channel with a conflicting funding transaction.
+ ///
+ /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], funding_transaction: Transaction) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
for inp in funding_transaction.input.iter() {
if inp.witness.is_empty() {
// be absurd. We ensure this by checking that at least 500 (our stated public contract on when
// broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
// message...
- const HALF_MESSAGE_IS_ADDRS: u32 = ::std::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
+ const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
#[deny(const_err)]
#[allow(dead_code)]
// ...by failing to compile if the number of addresses that would be half of a message is
/// only Tor Onion addresses.
///
/// Panics if addresses is absurdly large (more than 500).
- pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], addresses: Vec<NetAddress>) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
if addresses.len() > 500 {
panic!("More than half the message size was taken up by public addresses!");
}
+ // While all existing nodes handle unsorted addresses just fine, the spec requires that
+ // addresses be sorted for future compatibility.
+ addresses.sort_by_key(|addr| addr.get_id());
+
let announcement = msgs::UnsignedNodeAnnouncement {
features: NodeFeatures::known(),
timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
/// Should only really ever be called in response to a PendingHTLCsForwardable event.
/// Will likely generate further events.
pub fn process_pending_htlc_forwards(&self) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut new_events = Vec::new();
let mut failed_forwards = Vec::new();
},
HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
log_trace!(self.logger, "Failing HTLC back to channel with short id {} after delay", short_chan_id);
- match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet) {
+ match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
Err(e) => {
if let ChannelError::Ignore(msg) = e {
log_trace!(self.logger, "Failed to fail backwards to short_id {}: {}", short_chan_id, msg);
/// BroadcastChannelUpdate events in timer_tick_occurred.
///
/// Expects the caller to have a total_consistency_lock read lock.
- fn process_background_events(&self) {
+ fn process_background_events(&self) -> bool {
let mut background_events = Vec::new();
mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
+ if background_events.is_empty() {
+ return false;
+ }
+
for event in background_events.drain(..) {
match event {
BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
},
}
}
+ true
}
#[cfg(any(test, feature = "_test_utils"))]
///
/// Note that in some rare cases this may generate a `chain::Watch::update_channel` call.
pub fn timer_tick_occurred(&self) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
- self.process_background_events();
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut should_persist = NotifyOption::SkipPersist;
+ if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
- for (_, chan) in channel_state.by_id.iter_mut() {
- if chan.is_disabled_staged() && !chan.is_live() {
- if let Ok(update) = self.get_channel_update(&chan) {
- channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
- });
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ for (_, chan) in channel_state.by_id.iter_mut() {
+ match chan.channel_update_status() {
+ ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
+ ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
+ ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
+ ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
+ ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
+ },
+ ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
+ if let Ok(update) = self.get_channel_update(&chan) {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ should_persist = NotifyOption::DoPersist;
+ chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
+ },
+ _ => {},
}
- chan.to_fresh();
- } else if chan.is_disabled_staged() && chan.is_live() {
- chan.to_fresh();
- } else if chan.is_disabled_marked() {
- chan.to_disabled_staged();
}
- }
+
+ should_persist
+ });
}
/// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
/// Returns false if no payment was found to fail backwards, true if the process of failing the
/// HTLC backwards has been started.
pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
self.fail_htlc_backwards_internal(channel_state,
htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
},
- HTLCSource::OutboundRoute { .. } => {
- self.pending_events.lock().unwrap().push(
- events::Event::PaymentFailed {
- payment_hash,
- rejected_by_dest: false,
+ HTLCSource::OutboundRoute { session_priv, .. } => {
+ if {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
+ } {
+ self.pending_events.lock().unwrap().push(
+ events::Event::PaymentFailed {
+ payment_hash,
+ rejected_by_dest: false,
#[cfg(test)]
- error_code: None,
+ error_code: None,
#[cfg(test)]
- error_data: None,
- }
- )
+ error_data: None,
+ }
+ )
+ } else {
+ log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+ }
},
};
}
// from block_connected which may run during initialization prior to the chain_monitor
// being fully configured. See the docs for `ChannelManagerReadArgs` for more.
match source {
- HTLCSource::OutboundRoute { ref path, .. } => {
+ HTLCSource::OutboundRoute { ref path, session_priv, .. } => {
+ if {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ !self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
+ } {
+ log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
+ return;
+ }
log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
mem::drop(channel_state_lock);
match &onion_error {
pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut channel_state = Some(self.channel_state.lock().unwrap());
let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage) {
match source {
- HTLCSource::OutboundRoute { .. } => {
+ HTLCSource::OutboundRoute { session_priv, .. } => {
mem::drop(channel_state_lock);
- let mut pending_events = self.pending_events.lock().unwrap();
- pending_events.push(events::Event::PaymentSent {
- payment_preimage
- });
+ if {
+ let mut session_priv_bytes = [0; 32];
+ session_priv_bytes.copy_from_slice(&session_priv[..]);
+ self.pending_outbound_payments.lock().unwrap().remove(&session_priv_bytes)
+ } {
+ let mut pending_events = self.pending_events.lock().unwrap();
+ pending_events.push(events::Event::PaymentSent {
+ payment_preimage
+ });
+ } else {
+ log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
+ }
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
/// 4) once all remote copies are updated, you call this function with the update_id that
/// completed, and once it is the latest the Channel will be re-enabled.
pub fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
-
- let mut close_results = Vec::new();
- let mut htlc_forwards = Vec::new();
- let mut htlc_failures = Vec::new();
- let mut pending_events = Vec::new();
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
- {
+ let (mut pending_failures, chan_restoration_res) = {
let mut channel_lock = self.channel_state.lock().unwrap();
let channel_state = &mut *channel_lock;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- let channel = match channel_state.by_id.get_mut(&funding_txo.to_channel_id()) {
- Some(chan) => chan,
- None => return,
+ let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
+ hash_map::Entry::Occupied(chan) => chan,
+ hash_map::Entry::Vacant(_) => return,
};
- if !channel.is_awaiting_monitor_update() || channel.get_latest_monitor_update_id() != highest_applied_update_id {
+ if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
return;
}
- let (raa, commitment_update, order, pending_forwards, mut pending_failures, funding_broadcastable, funding_locked) = channel.monitor_updating_restored(&self.logger);
- if !pending_forwards.is_empty() {
- htlc_forwards.push((channel.get_short_channel_id().expect("We can't have pending forwards before funding confirmation"), funding_txo.clone(), pending_forwards));
- }
- htlc_failures.append(&mut pending_failures);
-
- macro_rules! handle_cs { () => {
- if let Some(update) = commitment_update {
- pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
- node_id: channel.get_counterparty_node_id(),
- updates: update,
- });
- }
- } }
- macro_rules! handle_raa { () => {
- if let Some(revoke_and_ack) = raa {
- pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: channel.get_counterparty_node_id(),
- msg: revoke_and_ack,
- });
- }
- } }
- match order {
- RAACommitmentOrder::CommitmentFirst => {
- handle_cs!();
- handle_raa!();
- },
- RAACommitmentOrder::RevokeAndACKFirst => {
- handle_raa!();
- handle_cs!();
- },
- }
- if let Some(tx) = funding_broadcastable {
- self.tx_broadcaster.broadcast_transaction(&tx);
- }
- if let Some(msg) = funding_locked {
- pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: channel.get_counterparty_node_id(),
- msg,
- });
- if let Some(announcement_sigs) = self.get_announcement_sigs(channel) {
- pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
- node_id: channel.get_counterparty_node_id(),
- msg: announcement_sigs,
- });
- }
- short_to_id.insert(channel.get_short_channel_id().unwrap(), channel.channel_id());
- }
- }
-
- self.pending_events.lock().unwrap().append(&mut pending_events);
-
- for failure in htlc_failures.drain(..) {
+ let (raa, commitment_update, order, pending_forwards, pending_failures, funding_broadcastable, funding_locked) = channel.get_mut().monitor_updating_restored(&self.logger);
+ (pending_failures, handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, raa, commitment_update, order, None, pending_forwards, funding_broadcastable, funding_locked))
+ };
+ post_handle_chan_restoration!(self, chan_restoration_res);
+ for failure in pending_failures.drain(..) {
self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
}
- self.forward_htlcs(&mut htlc_forwards[..]);
-
- for res in close_results.drain(..) {
- self.finish_force_close_channel(res);
- }
}
fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
}
};
+ log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
self.tx_broadcaster.broadcast_transaction(&funding_tx);
Ok(())
}
}
};
if let Some(broadcast_tx) = tx {
- log_trace!(self.logger, "Broadcast onchain {}", log_tx!(broadcast_tx));
+ log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
}
if let Some(chan) = chan_option {
}
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
- let mut channel_state_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_state_lock;
+ let (htlcs_failed_forward, chan_restoration_res) = {
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
- match channel_state.by_id.entry(msg.channel_id) {
- hash_map::Entry::Occupied(mut chan) => {
- if chan.get().get_counterparty_node_id() != *counterparty_node_id {
- return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
- }
- // Currently, we expect all holding cell update_adds to be dropped on peer
- // disconnect, so Channel's reestablish will never hand us any holding cell
- // freed HTLCs to fail backwards. If in the future we no longer drop pending
- // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
- let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, mut order, shutdown) =
- try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
- if let Some(monitor_update) = monitor_update_opt {
- if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
- // channel_reestablish doesn't guarantee the order it returns is sensical
- // for the messages it returns, but if we're setting what messages to
- // re-transmit on monitor update success, we need to make sure it is sane.
- if revoke_and_ack.is_none() {
- order = RAACommitmentOrder::CommitmentFirst;
- }
- if commitment_update.is_none() {
- order = RAACommitmentOrder::RevokeAndACKFirst;
- }
- return_monitor_err!(self, e, channel_state, chan, order, revoke_and_ack.is_some(), commitment_update.is_some());
- //TODO: Resend the funding_locked if needed once we get the monitor running again
- }
- }
- if let Some(msg) = funding_locked {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
- node_id: counterparty_node_id.clone(),
- msg
- });
- }
- macro_rules! send_raa { () => {
- if let Some(msg) = revoke_and_ack {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
- node_id: counterparty_node_id.clone(),
- msg
- });
+ match channel_state.by_id.entry(msg.channel_id) {
+ hash_map::Entry::Occupied(mut chan) => {
+ if chan.get().get_counterparty_node_id() != *counterparty_node_id {
+ return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
}
- } }
- macro_rules! send_cu { () => {
- if let Some(updates) = commitment_update {
- channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ // Currently, we expect all holding cell update_adds to be dropped on peer
+ // disconnect, so Channel's reestablish will never hand us any holding cell
+ // freed HTLCs to fail backwards. If in the future we no longer drop pending
+ // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
+ let (funding_locked, revoke_and_ack, commitment_update, monitor_update_opt, order, htlcs_failed_forward, shutdown) =
+ try_chan_entry!(self, chan.get_mut().channel_reestablish(msg, &self.logger), channel_state, chan);
+ if let Some(msg) = shutdown {
+ channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
node_id: counterparty_node_id.clone(),
- updates
+ msg,
});
}
- } }
- match order {
- RAACommitmentOrder::RevokeAndACKFirst => {
- send_raa!();
- send_cu!();
- },
- RAACommitmentOrder::CommitmentFirst => {
- send_cu!();
- send_raa!();
- },
- }
- if let Some(msg) = shutdown {
- channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
- node_id: counterparty_node_id.clone(),
- msg,
- });
- }
- Ok(())
- },
- hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
- }
+ (htlcs_failed_forward, handle_chan_restoration_locked!(self, channel_state_lock, channel_state, chan, revoke_and_ack, commitment_update, order, monitor_update_opt, Vec::new(), None, funding_locked))
+ },
+ hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
+ }
+ };
+ post_handle_chan_restoration!(self, chan_restoration_res);
+ self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
+ Ok(())
}
/// Begin Update fee process. Allowed only on an outbound channel.
/// (C-not exported) Cause its doc(hidden) anyway
#[doc(hidden)]
pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let counterparty_node_id;
let err: Result<(), _> = loop {
let mut channel_state_lock = self.channel_state.lock().unwrap();
}
}
- /// Process pending events from the `chain::Watch`.
- fn process_pending_monitor_events(&self) {
+ /// Process pending events from the `chain::Watch`, returning whether any events were processed.
+ fn process_pending_monitor_events(&self) -> bool {
let mut failed_channels = Vec::new();
+ let pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
+ let has_pending_monitor_events = !pending_monitor_events.is_empty();
+ for monitor_event in pending_monitor_events {
+ match monitor_event {
+ MonitorEvent::HTLCEvent(htlc_update) => {
+ if let Some(preimage) = htlc_update.payment_preimage {
+ log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
+ self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
+ } else {
+ log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
+ self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ }
+ },
+ MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
+ let mut channel_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+ if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
+ if let Some(short_id) = chan.get_short_channel_id() {
+ short_to_id.remove(&short_id);
+ }
+ failed_channels.push(chan.force_shutdown(false));
+ if let Ok(update) = self.get_channel_update(&chan) {
+ pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
+ msg: update
+ });
+ }
+ pending_msg_events.push(events::MessageSendEvent::HandleError {
+ node_id: chan.get_counterparty_node_id(),
+ action: msgs::ErrorAction::SendErrorMessage {
+ msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
+ },
+ });
+ }
+ },
+ }
+ }
+
+ for failure in failed_channels.drain(..) {
+ self.finish_force_close_channel(failure);
+ }
+
+ has_pending_monitor_events
+ }
+
+ /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
+ /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
+ /// update was applied.
+ ///
+ /// This should only apply to HTLCs which were added to the holding cell because we were
+ /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
+ /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
+ /// code to inform them of a channel monitor update.
+ fn check_free_holding_cells(&self) -> bool {
+ let mut has_monitor_update = false;
+ let mut failed_htlcs = Vec::new();
+ let mut handle_errors = Vec::new();
{
- for monitor_event in self.chain_monitor.release_pending_monitor_events() {
- match monitor_event {
- MonitorEvent::HTLCEvent(htlc_update) => {
- if let Some(preimage) = htlc_update.payment_preimage {
- log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
- self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage);
- } else {
- log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
+ let mut channel_state_lock = self.channel_state.lock().unwrap();
+ let channel_state = &mut *channel_state_lock;
+ let by_id = &mut channel_state.by_id;
+ let short_to_id = &mut channel_state.short_to_id;
+ let pending_msg_events = &mut channel_state.pending_msg_events;
+
+ by_id.retain(|channel_id, chan| {
+ match chan.maybe_free_holding_cell_htlcs(&self.logger) {
+ Ok((commitment_opt, holding_cell_failed_htlcs)) => {
+ if !holding_cell_failed_htlcs.is_empty() {
+ failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
}
- },
- MonitorEvent::CommitmentTxBroadcasted(funding_outpoint) => {
- let mut channel_lock = self.channel_state.lock().unwrap();
- let channel_state = &mut *channel_lock;
- let by_id = &mut channel_state.by_id;
- let short_to_id = &mut channel_state.short_to_id;
- let pending_msg_events = &mut channel_state.pending_msg_events;
- if let Some(mut chan) = by_id.remove(&funding_outpoint.to_channel_id()) {
- if let Some(short_id) = chan.get_short_channel_id() {
- short_to_id.remove(&short_id);
- }
- failed_channels.push(chan.force_shutdown(false));
- if let Ok(update) = self.get_channel_update(&chan) {
- pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
- msg: update
+ if let Some((commitment_update, monitor_update)) = commitment_opt {
+ if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
+ has_monitor_update = true;
+ let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), res));
+ if close_channel { return false; }
+ } else {
+ pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
+ node_id: chan.get_counterparty_node_id(),
+ updates: commitment_update,
});
}
- pending_msg_events.push(events::MessageSendEvent::HandleError {
- node_id: chan.get_counterparty_node_id(),
- action: msgs::ErrorAction::SendErrorMessage {
- msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
- },
- });
}
+ true
},
+ Err(e) => {
+ let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
+ handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
+ !close_channel
+ }
}
- }
+ });
}
- for failure in failed_channels.drain(..) {
- self.finish_force_close_channel(failure);
+ let has_update = has_monitor_update || !failed_htlcs.is_empty();
+ for (failures, channel_id) in failed_htlcs.drain(..) {
+ self.fail_holding_cell_htlcs(failures, channel_id);
+ }
+
+ for (counterparty_node_id, err) in handle_errors.drain(..) {
+ let _ = handle_error!(self, err, counterparty_node_id);
}
+
+ has_update
}
/// Handle a list of channel failures during a block_connected or block_disconnected call,
let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes());
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
match payment_secrets.entry(payment_hash) {
hash_map::Entry::Vacant(e) => {
/// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
/// in excess of the current time. This should roughly match the expiry time set in the invoice.
/// After this many seconds, we will remove the inbound payment, resulting in any attempts to
- /// pay the invoice failing. The BOLT spec suggests 7,200 secs as a default validity time for
+ /// pay the invoice failing. The BOLT spec suggests 3,600 secs as a default validity time for
/// invoices when no timeout is set.
///
/// Note that we use block header time to time-out pending inbound payments (with some margin
/// If you need exact expiry semantics, you should enforce them upon receipt of
/// [`PaymentReceived`].
///
+ /// Pending inbound payments are stored in memory and in serialized versions of this
+ /// [`ChannelManager`]. If potentially unbounded numbers of inbound payments may exist and
+ /// space is limited, you may wish to rate-limit inbound payment creation.
+ ///
/// May panic if `invoice_expiry_delta_secs` is greater than one year.
///
/// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry`
pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32, user_payment_id: u64) -> Result<PaymentSecret, APIError> {
self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs, user_payment_id)
}
+
+ #[cfg(any(test, feature = "fuzztarget", feature = "_test_utils"))]
+ pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
+ let events = std::cell::RefCell::new(Vec::new());
+ let event_handler = |event| events.borrow_mut().push(event);
+ self.process_pending_events(&event_handler);
+ events.into_inner()
+ }
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
L::Target: Logger,
{
fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ let events = RefCell::new(Vec::new());
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
- let mut ret = Vec::new();
- let mut channel_state = self.channel_state.lock().unwrap();
- mem::swap(&mut ret, &mut channel_state.pending_msg_events);
- ret
+ if self.check_free_holding_cells() {
+ result = NotifyOption::DoPersist;
+ }
+
+ let mut pending_events = Vec::new();
+ let mut channel_state = self.channel_state.lock().unwrap();
+ mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
+
+ if !pending_events.is_empty() {
+ events.replace(pending_events);
+ }
+
+ result
+ });
+ events.into_inner()
}
}
impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
- where M::Target: chain::Watch<Signer>,
- T::Target: BroadcasterInterface,
- K::Target: KeysInterface<Signer = Signer>,
- F::Target: FeeEstimator,
- L::Target: Logger,
+where
+ M::Target: chain::Watch<Signer>,
+ T::Target: BroadcasterInterface,
+ K::Target: KeysInterface<Signer = Signer>,
+ F::Target: FeeEstimator,
+ L::Target: Logger,
{
- fn get_and_clear_pending_events(&self) -> Vec<Event> {
- //TODO: This behavior should be documented. It's non-intuitive that we query
- // ChannelMonitors when clearing other events.
- self.process_pending_monitor_events();
+ /// Processes events that must be periodically handled.
+ ///
+ /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
+ /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
+ ///
+ /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
+ /// when processed, an [`EventHandler`] must be able to handle previously seen events when
+ /// restarting from an old state.
+ fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
+ PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
+ let mut result = NotifyOption::SkipPersist;
+
+ // TODO: This behavior should be documented. It's unintuitive that we query
+ // ChannelMonitors when clearing other events.
+ if self.process_pending_monitor_events() {
+ result = NotifyOption::DoPersist;
+ }
- let mut ret = Vec::new();
- let mut pending_events = self.pending_events.lock().unwrap();
- mem::swap(&mut ret, &mut *pending_events);
- ret
+ let mut pending_events = std::mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
+ if !pending_events.is_empty() {
+ result = NotifyOption::DoPersist;
+ }
+
+ for event in pending_events.drain(..) {
+ handler.handle_event(event);
+ }
+
+ result
+ });
}
}
}
fn block_disconnected(&self, header: &BlockHeader, height: u32) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let new_height = height - 1;
{
let mut best_block = self.best_block.write().unwrap();
let block_hash = header.block_hash();
log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, &self.logger).map(|a| (a, Vec::new())));
}
let block_hash = header.block_hash();
log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
*self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
}
fn transaction_unconfirmed(&self, txid: &Txid) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
self.do_chain_event(None, |channel| {
if let Some(funding_txo) = channel.get_funding_txo() {
if funding_txo.txid == *txid {
L::Target: Logger,
{
fn handle_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
}
fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
}
fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id);
}
fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id);
}
fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
}
fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
let mut failed_channels = Vec::new();
- let mut failed_payments = Vec::new();
let mut no_channels_remain = true;
{
let mut channel_state_lock = self.channel_state.lock().unwrap();
log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates", log_pubkey!(counterparty_node_id));
channel_state.by_id.retain(|_, chan| {
if chan.get_counterparty_node_id() == *counterparty_node_id {
- // Note that currently on channel reestablish we assert that there are no
- // holding cell add-HTLCs, so if in the future we stop removing uncommitted HTLCs
- // on peer disconnect here, there will need to be corresponding changes in
- // reestablish logic.
- let failed_adds = chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
- chan.to_disabled_marked();
- if !failed_adds.is_empty() {
- let chan_update = self.get_channel_update(&chan).map(|u| u.encode_with_len()).unwrap(); // Cannot add/recv HTLCs before we have a short_id so unwrap is safe
- failed_payments.push((chan_update, failed_adds));
- }
+ chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
if chan.is_shutdown() {
if let Some(short_id) = chan.get_short_channel_id() {
short_to_id.remove(&short_id);
for failure in failed_channels.drain(..) {
self.finish_force_close_channel(failure);
}
- for (chan_update, mut htlc_sources) in failed_payments {
- for (htlc_source, payment_hash) in htlc_sources.drain(..) {
- self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, HTLCFailReason::Reason { failure_code: 0x1000 | 7, data: chan_update.clone() });
- }
- }
}
fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) {
log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
{
let mut peer_state_lock = self.per_peer_state.write().unwrap();
}
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
- let _persistence_guard = PersistenceNotifierGuard::new(&self.total_consistency_lock, &self.persistence_notifier);
+ let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
if msg.channel_id == [0; 32] {
for chan in self.list_channels() {
loop {
let &(ref mtx, ref cvar) = &self.persistence_lock;
let mut guard = mtx.lock().unwrap();
+ if *guard {
+ *guard = false;
+ return;
+ }
guard = cvar.wait(guard).unwrap();
let result = *guard;
if result {
loop {
let &(ref mtx, ref cvar) = &self.persistence_lock;
let mut guard = mtx.lock().unwrap();
+ if *guard {
+ *guard = false;
+ return true;
+ }
guard = cvar.wait_timeout(guard, max_wait).unwrap().0;
// Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
// desired wait time has actually passed, and if not then restart the loop with a reduced wait
pending_payment.write(writer)?;
}
+ let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
+ (pending_outbound_payments.len() as u64).write(writer)?;
+ for session_priv in pending_outbound_payments.iter() {
+ session_priv.write(writer)?;
+ }
+
Ok(())
}
}
}
}
+ let pending_outbound_payments_count: u64 = Readable::read(reader)?;
+ let mut pending_outbound_payments: HashSet<[u8; 32]> = HashSet::with_capacity(cmp::min(pending_outbound_payments_count as usize, MAX_ALLOC_SIZE/32));
+ for _ in 0..pending_outbound_payments_count {
+ if !pending_outbound_payments.insert(Readable::read(reader)?) {
+ return Err(DecodeError::InvalidValue);
+ }
+ }
+
let mut secp_ctx = Secp256k1::new();
secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
pending_msg_events: Vec::new(),
}),
pending_inbound_payments: Mutex::new(pending_inbound_payments),
+ pending_outbound_payments: Mutex::new(pending_outbound_payments),
our_network_key: args.keys_manager.get_node_secret(),
our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &args.keys_manager.get_node_secret()),
mod tests {
use ln::channelmanager::PersistenceNotifier;
use std::sync::Arc;
- use std::sync::atomic::{AtomicBool, Ordering};
+ use core::sync::atomic::{AtomicBool, Ordering};
use std::thread;
- use std::time::Duration;
+ use core::time::Duration;
#[test]
fn test_wait_timeout() {
use routing::router::get_route;
use util::test_utils;
use util::config::UserConfig;
- use util::events::{Event, EventsProvider, MessageSendEvent, MessageSendEventsProvider};
+ use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
use bitcoin::hashes::Hash;
use bitcoin::hashes::sha256::Hash as Sha256;