]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/channelmanager.rs
Merge pull request #2198 from TheBlueMatt/2023-04-fewer-disables
[rust-lightning] / lightning / src / ln / channelmanager.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! The top-level channel management and payment tracking stuff lives here.
11 //!
12 //! The [`ChannelManager`] is the main chunk of logic implementing the lightning protocol and is
13 //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
14 //! upon reconnect to the relevant peer(s).
15 //!
16 //! It does not manage routing logic (see [`Router`] for that) nor does it manage constructing
17 //! on-chain transactions (it only monitors the chain to watch for any force-closes that might
18 //! imply it needs to fail HTLCs/payments/channels it manages).
19
20 use bitcoin::blockdata::block::BlockHeader;
21 use bitcoin::blockdata::transaction::Transaction;
22 use bitcoin::blockdata::constants::genesis_block;
23 use bitcoin::network::constants::Network;
24
25 use bitcoin::hashes::Hash;
26 use bitcoin::hashes::sha256::Hash as Sha256;
27 use bitcoin::hash_types::{BlockHash, Txid};
28
29 use bitcoin::secp256k1::{SecretKey,PublicKey};
30 use bitcoin::secp256k1::Secp256k1;
31 use bitcoin::{LockTime, secp256k1, Sequence};
32
33 use crate::chain;
34 use crate::chain::{Confirm, ChannelMonitorUpdateStatus, Watch, BestBlock};
35 use crate::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator};
36 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
37 use crate::chain::transaction::{OutPoint, TransactionData};
38 use crate::events;
39 use crate::events::{Event, EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination, PaymentFailureReason};
40 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
41 // construct one themselves.
42 use crate::ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
43 use crate::ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
44 use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, InitFeatures, NodeFeatures};
45 #[cfg(any(feature = "_test_utils", test))]
46 use crate::ln::features::InvoiceFeatures;
47 use crate::routing::gossip::NetworkGraph;
48 use crate::routing::router::{DefaultRouter, InFlightHtlcs, PaymentParameters, Route, RouteHop, RouteParameters, RoutePath, Router};
49 use crate::routing::scoring::ProbabilisticScorer;
50 use crate::ln::msgs;
51 use crate::ln::onion_utils;
52 use crate::ln::onion_utils::HTLCFailReason;
53 use crate::ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT};
54 #[cfg(test)]
55 use crate::ln::outbound_payment;
56 use crate::ln::outbound_payment::{OutboundPayments, PaymentAttempts, PendingOutboundPayment};
57 use crate::ln::wire::Encode;
58 use crate::chain::keysinterface::{EntropySource, KeysManager, NodeSigner, Recipient, SignerProvider, ChannelSigner, WriteableEcdsaChannelSigner};
59 use crate::util::config::{UserConfig, ChannelConfig};
60 use crate::util::wakers::{Future, Notifier};
61 use crate::util::scid_utils::fake_scid;
62 use crate::util::string::UntrustedString;
63 use crate::util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
64 use crate::util::logger::{Level, Logger};
65 use crate::util::errors::APIError;
66
67 use alloc::collections::BTreeMap;
68
69 use crate::io;
70 use crate::prelude::*;
71 use core::{cmp, mem};
72 use core::cell::RefCell;
73 use crate::io::Read;
74 use crate::sync::{Arc, Mutex, RwLock, RwLockReadGuard, FairRwLock, LockTestExt, LockHeldState};
75 use core::sync::atomic::{AtomicUsize, Ordering};
76 use core::time::Duration;
77 use core::ops::Deref;
78
79 // Re-export this for use in the public API.
80 pub use crate::ln::outbound_payment::{PaymentSendFailure, Retry, RetryableSendFailure, RecipientOnionFields};
81
82 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
83 //
84 // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
85 // forward the HTLC with information it will give back to us when it does so, or if it should Fail
86 // the HTLC with the relevant message for the Channel to handle giving to the remote peer.
87 //
88 // Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
89 // Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
90 // with it to track where it came from (in case of onwards-forward error), waiting a random delay
91 // before we forward it.
92 //
93 // We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
94 // relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
95 // to either fail-backwards or fulfill the HTLC backwards along the relevant path).
96 // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
97 // our payment, which we can use to decode errors or inform the user that the payment was sent.
98
99 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
100 pub(super) enum PendingHTLCRouting {
101         Forward {
102                 onion_packet: msgs::OnionPacket,
103                 /// The SCID from the onion that we should forward to. This could be a real SCID or a fake one
104                 /// generated using `get_fake_scid` from the scid_utils::fake_scid module.
105                 short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
106         },
107         Receive {
108                 payment_data: msgs::FinalOnionHopData,
109                 incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
110                 phantom_shared_secret: Option<[u8; 32]>,
111         },
112         ReceiveKeysend {
113                 payment_preimage: PaymentPreimage,
114                 incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
115         },
116 }
117
118 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
119 pub(super) struct PendingHTLCInfo {
120         pub(super) routing: PendingHTLCRouting,
121         pub(super) incoming_shared_secret: [u8; 32],
122         payment_hash: PaymentHash,
123         /// Amount received
124         pub(super) incoming_amt_msat: Option<u64>, // Added in 0.0.113
125         /// Sender intended amount to forward or receive (actual amount received
126         /// may overshoot this in either case)
127         pub(super) outgoing_amt_msat: u64,
128         pub(super) outgoing_cltv_value: u32,
129 }
130
131 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
132 pub(super) enum HTLCFailureMsg {
133         Relay(msgs::UpdateFailHTLC),
134         Malformed(msgs::UpdateFailMalformedHTLC),
135 }
136
137 /// Stores whether we can't forward an HTLC or relevant forwarding info
138 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
139 pub(super) enum PendingHTLCStatus {
140         Forward(PendingHTLCInfo),
141         Fail(HTLCFailureMsg),
142 }
143
144 pub(super) struct PendingAddHTLCInfo {
145         pub(super) forward_info: PendingHTLCInfo,
146
147         // These fields are produced in `forward_htlcs()` and consumed in
148         // `process_pending_htlc_forwards()` for constructing the
149         // `HTLCSource::PreviousHopData` for failed and forwarded
150         // HTLCs.
151         //
152         // Note that this may be an outbound SCID alias for the associated channel.
153         prev_short_channel_id: u64,
154         prev_htlc_id: u64,
155         prev_funding_outpoint: OutPoint,
156         prev_user_channel_id: u128,
157 }
158
159 pub(super) enum HTLCForwardInfo {
160         AddHTLC(PendingAddHTLCInfo),
161         FailHTLC {
162                 htlc_id: u64,
163                 err_packet: msgs::OnionErrorPacket,
164         },
165 }
166
167 /// Tracks the inbound corresponding to an outbound HTLC
168 #[derive(Clone, Hash, PartialEq, Eq)]
169 pub(crate) struct HTLCPreviousHopData {
170         // Note that this may be an outbound SCID alias for the associated channel.
171         short_channel_id: u64,
172         htlc_id: u64,
173         incoming_packet_shared_secret: [u8; 32],
174         phantom_shared_secret: Option<[u8; 32]>,
175
176         // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
177         // channel with a preimage provided by the forward channel.
178         outpoint: OutPoint,
179 }
180
181 enum OnionPayload {
182         /// Indicates this incoming onion payload is for the purpose of paying an invoice.
183         Invoice {
184                 /// This is only here for backwards-compatibility in serialization, in the future it can be
185                 /// removed, breaking clients running 0.0.106 and earlier.
186                 _legacy_hop_data: Option<msgs::FinalOnionHopData>,
187         },
188         /// Contains the payer-provided preimage.
189         Spontaneous(PaymentPreimage),
190 }
191
192 /// HTLCs that are to us and can be failed/claimed by the user
193 struct ClaimableHTLC {
194         prev_hop: HTLCPreviousHopData,
195         cltv_expiry: u32,
196         /// The amount (in msats) of this MPP part
197         value: u64,
198         /// The amount (in msats) that the sender intended to be sent in this MPP
199         /// part (used for validating total MPP amount)
200         sender_intended_value: u64,
201         onion_payload: OnionPayload,
202         timer_ticks: u8,
203         /// The total value received for a payment (sum of all MPP parts if the payment is a MPP).
204         /// Gets set to the amount reported when pushing [`Event::PaymentClaimable`].
205         total_value_received: Option<u64>,
206         /// The sender intended sum total of all MPP parts specified in the onion
207         total_msat: u64,
208 }
209
210 /// A payment identifier used to uniquely identify a payment to LDK.
211 ///
212 /// This is not exported to bindings users as we just use [u8; 32] directly
213 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
214 pub struct PaymentId(pub [u8; 32]);
215
216 impl Writeable for PaymentId {
217         fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
218                 self.0.write(w)
219         }
220 }
221
222 impl Readable for PaymentId {
223         fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
224                 let buf: [u8; 32] = Readable::read(r)?;
225                 Ok(PaymentId(buf))
226         }
227 }
228
229 /// An identifier used to uniquely identify an intercepted HTLC to LDK.
230 ///
231 /// This is not exported to bindings users as we just use [u8; 32] directly
232 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
233 pub struct InterceptId(pub [u8; 32]);
234
235 impl Writeable for InterceptId {
236         fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
237                 self.0.write(w)
238         }
239 }
240
241 impl Readable for InterceptId {
242         fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
243                 let buf: [u8; 32] = Readable::read(r)?;
244                 Ok(InterceptId(buf))
245         }
246 }
247
248 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
249 /// Uniquely describes an HTLC by its source. Just the guaranteed-unique subset of [`HTLCSource`].
250 pub(crate) enum SentHTLCId {
251         PreviousHopData { short_channel_id: u64, htlc_id: u64 },
252         OutboundRoute { session_priv: SecretKey },
253 }
254 impl SentHTLCId {
255         pub(crate) fn from_source(source: &HTLCSource) -> Self {
256                 match source {
257                         HTLCSource::PreviousHopData(hop_data) => Self::PreviousHopData {
258                                 short_channel_id: hop_data.short_channel_id,
259                                 htlc_id: hop_data.htlc_id,
260                         },
261                         HTLCSource::OutboundRoute { session_priv, .. } =>
262                                 Self::OutboundRoute { session_priv: *session_priv },
263                 }
264         }
265 }
266 impl_writeable_tlv_based_enum!(SentHTLCId,
267         (0, PreviousHopData) => {
268                 (0, short_channel_id, required),
269                 (2, htlc_id, required),
270         },
271         (2, OutboundRoute) => {
272                 (0, session_priv, required),
273         };
274 );
275
276
277 /// Tracks the inbound corresponding to an outbound HTLC
278 #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
279 #[derive(Clone, PartialEq, Eq)]
280 pub(crate) enum HTLCSource {
281         PreviousHopData(HTLCPreviousHopData),
282         OutboundRoute {
283                 path: Vec<RouteHop>,
284                 session_priv: SecretKey,
285                 /// Technically we can recalculate this from the route, but we cache it here to avoid
286                 /// doing a double-pass on route when we get a failure back
287                 first_hop_htlc_msat: u64,
288                 payment_id: PaymentId,
289         },
290 }
291 #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
292 impl core::hash::Hash for HTLCSource {
293         fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
294                 match self {
295                         HTLCSource::PreviousHopData(prev_hop_data) => {
296                                 0u8.hash(hasher);
297                                 prev_hop_data.hash(hasher);
298                         },
299                         HTLCSource::OutboundRoute { path, session_priv, payment_id, first_hop_htlc_msat } => {
300                                 1u8.hash(hasher);
301                                 path.hash(hasher);
302                                 session_priv[..].hash(hasher);
303                                 payment_id.hash(hasher);
304                                 first_hop_htlc_msat.hash(hasher);
305                         },
306                 }
307         }
308 }
309 impl HTLCSource {
310         #[cfg(not(feature = "grind_signatures"))]
311         #[cfg(test)]
312         pub fn dummy() -> Self {
313                 HTLCSource::OutboundRoute {
314                         path: Vec::new(),
315                         session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
316                         first_hop_htlc_msat: 0,
317                         payment_id: PaymentId([2; 32]),
318                 }
319         }
320
321         #[cfg(debug_assertions)]
322         /// Checks whether this HTLCSource could possibly match the given HTLC output in a commitment
323         /// transaction. Useful to ensure different datastructures match up.
324         pub(crate) fn possibly_matches_output(&self, htlc: &super::chan_utils::HTLCOutputInCommitment) -> bool {
325                 if let HTLCSource::OutboundRoute { first_hop_htlc_msat, .. } = self {
326                         *first_hop_htlc_msat == htlc.amount_msat
327                 } else {
328                         // There's nothing we can check for forwarded HTLCs
329                         true
330                 }
331         }
332 }
333
334 struct ReceiveError {
335         err_code: u16,
336         err_data: Vec<u8>,
337         msg: &'static str,
338 }
339
340 /// This enum is used to specify which error data to send to peers when failing back an HTLC
341 /// using [`ChannelManager::fail_htlc_backwards_with_reason`].
342 ///
343 /// For more info on failure codes, see <https://github.com/lightning/bolts/blob/master/04-onion-routing.md#failure-messages>.
344 #[derive(Clone, Copy)]
345 pub enum FailureCode {
346         /// We had a temporary error processing the payment. Useful if no other error codes fit
347         /// and you want to indicate that the payer may want to retry.
348         TemporaryNodeFailure             = 0x2000 | 2,
349         /// We have a required feature which was not in this onion. For example, you may require
350         /// some additional metadata that was not provided with this payment.
351         RequiredNodeFeatureMissing       = 0x4000 | 0x2000 | 3,
352         /// You may wish to use this when a `payment_preimage` is unknown, or the CLTV expiry of
353         /// the HTLC is too close to the current block height for safe handling.
354         /// Using this failure code in [`ChannelManager::fail_htlc_backwards_with_reason`] is
355         /// equivalent to calling [`ChannelManager::fail_htlc_backwards`].
356         IncorrectOrUnknownPaymentDetails = 0x4000 | 15,
357 }
358
359 type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash, PublicKey, [u8; 32])>);
360
361 /// Error type returned across the peer_state mutex boundary. When an Err is generated for a
362 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
363 /// immediately (ie with no further calls on it made). Thus, this step happens inside a
364 /// peer_state lock. We then return the set of things that need to be done outside the lock in
365 /// this struct and call handle_error!() on it.
366
367 struct MsgHandleErrInternal {
368         err: msgs::LightningError,
369         chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
370         shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
371 }
372 impl MsgHandleErrInternal {
373         #[inline]
374         fn send_err_msg_no_close(err: String, channel_id: [u8; 32]) -> Self {
375                 Self {
376                         err: LightningError {
377                                 err: err.clone(),
378                                 action: msgs::ErrorAction::SendErrorMessage {
379                                         msg: msgs::ErrorMessage {
380                                                 channel_id,
381                                                 data: err
382                                         },
383                                 },
384                         },
385                         chan_id: None,
386                         shutdown_finish: None,
387                 }
388         }
389         #[inline]
390         fn from_no_close(err: msgs::LightningError) -> Self {
391                 Self { err, chan_id: None, shutdown_finish: None }
392         }
393         #[inline]
394         fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
395                 Self {
396                         err: LightningError {
397                                 err: err.clone(),
398                                 action: msgs::ErrorAction::SendErrorMessage {
399                                         msg: msgs::ErrorMessage {
400                                                 channel_id,
401                                                 data: err
402                                         },
403                                 },
404                         },
405                         chan_id: Some((channel_id, user_channel_id)),
406                         shutdown_finish: Some((shutdown_res, channel_update)),
407                 }
408         }
409         #[inline]
410         fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
411                 Self {
412                         err: match err {
413                                 ChannelError::Warn(msg) =>  LightningError {
414                                         err: msg.clone(),
415                                         action: msgs::ErrorAction::SendWarningMessage {
416                                                 msg: msgs::WarningMessage {
417                                                         channel_id,
418                                                         data: msg
419                                                 },
420                                                 log_level: Level::Warn,
421                                         },
422                                 },
423                                 ChannelError::Ignore(msg) => LightningError {
424                                         err: msg,
425                                         action: msgs::ErrorAction::IgnoreError,
426                                 },
427                                 ChannelError::Close(msg) => LightningError {
428                                         err: msg.clone(),
429                                         action: msgs::ErrorAction::SendErrorMessage {
430                                                 msg: msgs::ErrorMessage {
431                                                         channel_id,
432                                                         data: msg
433                                                 },
434                                         },
435                                 },
436                         },
437                         chan_id: None,
438                         shutdown_finish: None,
439                 }
440         }
441 }
442
443 /// We hold back HTLCs we intend to relay for a random interval greater than this (see
444 /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
445 /// This provides some limited amount of privacy. Ideally this would range from somewhere like one
446 /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
447 pub(super) const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
448
449 /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
450 /// be sent in the order they appear in the return value, however sometimes the order needs to be
451 /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
452 /// they were originally sent). In those cases, this enum is also returned.
453 #[derive(Clone, PartialEq)]
454 pub(super) enum RAACommitmentOrder {
455         /// Send the CommitmentUpdate messages first
456         CommitmentFirst,
457         /// Send the RevokeAndACK message first
458         RevokeAndACKFirst,
459 }
460
461 /// Information about a payment which is currently being claimed.
462 struct ClaimingPayment {
463         amount_msat: u64,
464         payment_purpose: events::PaymentPurpose,
465         receiver_node_id: PublicKey,
466 }
467 impl_writeable_tlv_based!(ClaimingPayment, {
468         (0, amount_msat, required),
469         (2, payment_purpose, required),
470         (4, receiver_node_id, required),
471 });
472
473 /// Information about claimable or being-claimed payments
474 struct ClaimablePayments {
475         /// Map from payment hash to the payment data and any HTLCs which are to us and can be
476         /// failed/claimed by the user.
477         ///
478         /// Note that, no consistency guarantees are made about the channels given here actually
479         /// existing anymore by the time you go to read them!
480         ///
481         /// When adding to the map, [`Self::pending_claiming_payments`] must also be checked to ensure
482         /// we don't get a duplicate payment.
483         claimable_htlcs: HashMap<PaymentHash, (events::PaymentPurpose, Vec<ClaimableHTLC>)>,
484
485         /// Map from payment hash to the payment data for HTLCs which we have begun claiming, but which
486         /// are waiting on a [`ChannelMonitorUpdate`] to complete in order to be surfaced to the user
487         /// as an [`events::Event::PaymentClaimed`].
488         pending_claiming_payments: HashMap<PaymentHash, ClaimingPayment>,
489 }
490
491 /// Events which we process internally but cannot be procsesed immediately at the generation site
492 /// for some reason. They are handled in timer_tick_occurred, so may be processed with
493 /// quite some time lag.
494 enum BackgroundEvent {
495         /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
496         /// commitment transaction.
497         ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
498 }
499
500 #[derive(Debug)]
501 pub(crate) enum MonitorUpdateCompletionAction {
502         /// Indicates that a payment ultimately destined for us was claimed and we should emit an
503         /// [`events::Event::PaymentClaimed`] to the user if we haven't yet generated such an event for
504         /// this payment. Note that this is only best-effort. On restart it's possible such a duplicate
505         /// event can be generated.
506         PaymentClaimed { payment_hash: PaymentHash },
507         /// Indicates an [`events::Event`] should be surfaced to the user.
508         EmitEvent { event: events::Event },
509 }
510
511 impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction,
512         (0, PaymentClaimed) => { (0, payment_hash, required) },
513         (2, EmitEvent) => { (0, event, upgradable_required) },
514 );
515
516 /// State we hold per-peer.
517 pub(super) struct PeerState<Signer: ChannelSigner> {
518         /// `temporary_channel_id` or `channel_id` -> `channel`.
519         ///
520         /// Holds all channels where the peer is the counterparty. Once a channel has been assigned a
521         /// `channel_id`, the `temporary_channel_id` key in the map is updated and is replaced by the
522         /// `channel_id`.
523         pub(super) channel_by_id: HashMap<[u8; 32], Channel<Signer>>,
524         /// The latest `InitFeatures` we heard from the peer.
525         latest_features: InitFeatures,
526         /// Messages to send to the peer - pushed to in the same lock that they are generated in (except
527         /// for broadcast messages, where ordering isn't as strict).
528         pub(super) pending_msg_events: Vec<MessageSendEvent>,
529         /// Map from a specific channel to some action(s) that should be taken when all pending
530         /// [`ChannelMonitorUpdate`]s for the channel complete updating.
531         ///
532         /// Note that because we generally only have one entry here a HashMap is pretty overkill. A
533         /// BTreeMap currently stores more than ten elements per leaf node, so even up to a few
534         /// channels with a peer this will just be one allocation and will amount to a linear list of
535         /// channels to walk, avoiding the whole hashing rigmarole.
536         ///
537         /// Note that the channel may no longer exist. For example, if a channel was closed but we
538         /// later needed to claim an HTLC which is pending on-chain, we may generate a monitor update
539         /// for a missing channel. While a malicious peer could construct a second channel with the
540         /// same `temporary_channel_id` (or final `channel_id` in the case of 0conf channels or prior
541         /// to funding appearing on-chain), the downstream `ChannelMonitor` set is required to ensure
542         /// duplicates do not occur, so such channels should fail without a monitor update completing.
543         monitor_update_blocked_actions: BTreeMap<[u8; 32], Vec<MonitorUpdateCompletionAction>>,
544         /// The peer is currently connected (i.e. we've seen a
545         /// [`ChannelMessageHandler::peer_connected`] and no corresponding
546         /// [`ChannelMessageHandler::peer_disconnected`].
547         is_connected: bool,
548 }
549
550 impl <Signer: ChannelSigner> PeerState<Signer> {
551         /// Indicates that a peer meets the criteria where we're ok to remove it from our storage.
552         /// If true is passed for `require_disconnected`, the function will return false if we haven't
553         /// disconnected from the node already, ie. `PeerState::is_connected` is set to `true`.
554         fn ok_to_remove(&self, require_disconnected: bool) -> bool {
555                 if require_disconnected && self.is_connected {
556                         return false
557                 }
558                 self.channel_by_id.is_empty() && self.monitor_update_blocked_actions.is_empty()
559         }
560 }
561
562 /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
563 /// actually ours and not some duplicate HTLC sent to us by a node along the route.
564 ///
565 /// For users who don't want to bother doing their own payment preimage storage, we also store that
566 /// here.
567 ///
568 /// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data
569 /// and instead encoding it in the payment secret.
570 struct PendingInboundPayment {
571         /// The payment secret that the sender must use for us to accept this payment
572         payment_secret: PaymentSecret,
573         /// Time at which this HTLC expires - blocks with a header time above this value will result in
574         /// this payment being removed.
575         expiry_time: u64,
576         /// Arbitrary identifier the user specifies (or not)
577         user_payment_id: u64,
578         // Other required attributes of the payment, optionally enforced:
579         payment_preimage: Option<PaymentPreimage>,
580         min_value_msat: Option<u64>,
581 }
582
583 /// [`SimpleArcChannelManager`] is useful when you need a [`ChannelManager`] with a static lifetime, e.g.
584 /// when you're using `lightning-net-tokio` (since `tokio::spawn` requires parameters with static
585 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
586 /// [`SimpleRefChannelManager`] is the more appropriate type. Defining these type aliases prevents
587 /// issues such as overly long function definitions. Note that the `ChannelManager` can take any type
588 /// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
589 /// or, respectively, [`Router`] for its router, but this type alias chooses the concrete types
590 /// of [`KeysManager`] and [`DefaultRouter`].
591 ///
592 /// This is not exported to bindings users as Arcs don't make sense in bindings
593 pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<
594         Arc<M>,
595         Arc<T>,
596         Arc<KeysManager>,
597         Arc<KeysManager>,
598         Arc<KeysManager>,
599         Arc<F>,
600         Arc<DefaultRouter<
601                 Arc<NetworkGraph<Arc<L>>>,
602                 Arc<L>,
603                 Arc<Mutex<ProbabilisticScorer<Arc<NetworkGraph<Arc<L>>>, Arc<L>>>>
604         >>,
605         Arc<L>
606 >;
607
608 /// [`SimpleRefChannelManager`] is a type alias for a ChannelManager reference, and is the reference
609 /// counterpart to the [`SimpleArcChannelManager`] type alias. Use this type by default when you don't
610 /// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
611 /// usage of lightning-net-tokio (since `tokio::spawn` requires parameters with static lifetimes).
612 /// But if this is not necessary, using a reference is more efficient. Defining these type aliases
613 /// issues such as overly long function definitions. Note that the ChannelManager can take any type
614 /// that implements [`NodeSigner`], [`EntropySource`], and [`SignerProvider`] for its keys manager,
615 /// or, respectively, [`Router`]  for its router, but this type alias chooses the concrete types
616 /// of [`KeysManager`] and [`DefaultRouter`].
617 ///
618 /// This is not exported to bindings users as Arcs don't make sense in bindings
619 pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = ChannelManager<&'a M, &'b T, &'c KeysManager, &'c KeysManager, &'c KeysManager, &'d F, &'e DefaultRouter<&'f NetworkGraph<&'g L>, &'g L, &'h Mutex<ProbabilisticScorer<&'f NetworkGraph<&'g L>, &'g L>>>, &'g L>;
620
621 /// A trivial trait which describes any [`ChannelManager`] used in testing.
622 #[cfg(any(test, feature = "_test_utils"))]
623 pub trait AChannelManager {
624         type Watch: chain::Watch<Self::Signer>;
625         type M: Deref<Target = Self::Watch>;
626         type Broadcaster: BroadcasterInterface;
627         type T: Deref<Target = Self::Broadcaster>;
628         type EntropySource: EntropySource;
629         type ES: Deref<Target = Self::EntropySource>;
630         type NodeSigner: NodeSigner;
631         type NS: Deref<Target = Self::NodeSigner>;
632         type Signer: WriteableEcdsaChannelSigner;
633         type SignerProvider: SignerProvider<Signer = Self::Signer>;
634         type SP: Deref<Target = Self::SignerProvider>;
635         type FeeEstimator: FeeEstimator;
636         type F: Deref<Target = Self::FeeEstimator>;
637         type Router: Router;
638         type R: Deref<Target = Self::Router>;
639         type Logger: Logger;
640         type L: Deref<Target = Self::Logger>;
641         fn get_cm(&self) -> &ChannelManager<Self::M, Self::T, Self::ES, Self::NS, Self::SP, Self::F, Self::R, Self::L>;
642 }
643 #[cfg(any(test, feature = "_test_utils"))]
644 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> AChannelManager
645 for ChannelManager<M, T, ES, NS, SP, F, R, L>
646 where
647         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer> + Sized,
648         T::Target: BroadcasterInterface + Sized,
649         ES::Target: EntropySource + Sized,
650         NS::Target: NodeSigner + Sized,
651         SP::Target: SignerProvider + Sized,
652         F::Target: FeeEstimator + Sized,
653         R::Target: Router + Sized,
654         L::Target: Logger + Sized,
655 {
656         type Watch = M::Target;
657         type M = M;
658         type Broadcaster = T::Target;
659         type T = T;
660         type EntropySource = ES::Target;
661         type ES = ES;
662         type NodeSigner = NS::Target;
663         type NS = NS;
664         type Signer = <SP::Target as SignerProvider>::Signer;
665         type SignerProvider = SP::Target;
666         type SP = SP;
667         type FeeEstimator = F::Target;
668         type F = F;
669         type Router = R::Target;
670         type R = R;
671         type Logger = L::Target;
672         type L = L;
673         fn get_cm(&self) -> &ChannelManager<M, T, ES, NS, SP, F, R, L> { self }
674 }
675
676 /// Manager which keeps track of a number of channels and sends messages to the appropriate
677 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
678 ///
679 /// Implements [`ChannelMessageHandler`], handling the multi-channel parts and passing things through
680 /// to individual Channels.
681 ///
682 /// Implements [`Writeable`] to write out all channel state to disk. Implies [`peer_disconnected`] for
683 /// all peers during write/read (though does not modify this instance, only the instance being
684 /// serialized). This will result in any channels which have not yet exchanged [`funding_created`] (i.e.,
685 /// called [`funding_transaction_generated`] for outbound channels) being closed.
686 ///
687 /// Note that you can be a bit lazier about writing out `ChannelManager` than you can be with
688 /// [`ChannelMonitor`]. With [`ChannelMonitor`] you MUST write each monitor update out to disk before
689 /// returning from [`chain::Watch::watch_channel`]/[`update_channel`], with ChannelManagers, writing updates
690 /// happens out-of-band (and will prevent any other `ChannelManager` operations from occurring during
691 /// the serialization process). If the deserialized version is out-of-date compared to the
692 /// [`ChannelMonitor`] passed by reference to [`read`], those channels will be force-closed based on the
693 /// `ChannelMonitor` state and no funds will be lost (mod on-chain transaction fees).
694 ///
695 /// Note that the deserializer is only implemented for `(`[`BlockHash`]`, `[`ChannelManager`]`)`, which
696 /// tells you the last block hash which was connected. You should get the best block tip before using the manager.
697 /// See [`chain::Listen`] and [`chain::Confirm`] for more details.
698 ///
699 /// Note that `ChannelManager` is responsible for tracking liveness of its channels and generating
700 /// [`ChannelUpdate`] messages informing peers that the channel is temporarily disabled. To avoid
701 /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
702 /// offline for a full minute. In order to track this, you must call
703 /// [`timer_tick_occurred`] roughly once per minute, though it doesn't have to be perfect.
704 ///
705 /// To avoid trivial DoS issues, `ChannelManager` limits the number of inbound connections and
706 /// inbound channels without confirmed funding transactions. This may result in nodes which we do
707 /// not have a channel with being unable to connect to us or open new channels with us if we have
708 /// many peers with unfunded channels.
709 ///
710 /// Because it is an indication of trust, inbound channels which we've accepted as 0conf are
711 /// exempted from the count of unfunded channels. Similarly, outbound channels and connections are
712 /// never limited. Please ensure you limit the count of such channels yourself.
713 ///
714 /// Rather than using a plain `ChannelManager`, it is preferable to use either a [`SimpleArcChannelManager`]
715 /// a [`SimpleRefChannelManager`], for conciseness. See their documentation for more details, but
716 /// essentially you should default to using a [`SimpleRefChannelManager`], and use a
717 /// [`SimpleArcChannelManager`] when you require a `ChannelManager` with a static lifetime, such as when
718 /// you're using lightning-net-tokio.
719 ///
720 /// [`peer_disconnected`]: msgs::ChannelMessageHandler::peer_disconnected
721 /// [`funding_created`]: msgs::FundingCreated
722 /// [`funding_transaction_generated`]: Self::funding_transaction_generated
723 /// [`BlockHash`]: bitcoin::hash_types::BlockHash
724 /// [`update_channel`]: chain::Watch::update_channel
725 /// [`ChannelUpdate`]: msgs::ChannelUpdate
726 /// [`timer_tick_occurred`]: Self::timer_tick_occurred
727 /// [`read`]: ReadableArgs::read
728 //
729 // Lock order:
730 // The tree structure below illustrates the lock order requirements for the different locks of the
731 // `ChannelManager`. Locks can be held at the same time if they are on the same branch in the tree,
732 // and should then be taken in the order of the lowest to the highest level in the tree.
733 // Note that locks on different branches shall not be taken at the same time, as doing so will
734 // create a new lock order for those specific locks in the order they were taken.
735 //
736 // Lock order tree:
737 //
738 // `total_consistency_lock`
739 //  |
740 //  |__`forward_htlcs`
741 //  |   |
742 //  |   |__`pending_intercepted_htlcs`
743 //  |
744 //  |__`per_peer_state`
745 //  |   |
746 //  |   |__`pending_inbound_payments`
747 //  |       |
748 //  |       |__`claimable_payments`
749 //  |       |
750 //  |       |__`pending_outbound_payments` // This field's struct contains a map of pending outbounds
751 //  |           |
752 //  |           |__`peer_state`
753 //  |               |
754 //  |               |__`id_to_peer`
755 //  |               |
756 //  |               |__`short_to_chan_info`
757 //  |               |
758 //  |               |__`outbound_scid_aliases`
759 //  |               |
760 //  |               |__`best_block`
761 //  |               |
762 //  |               |__`pending_events`
763 //  |                   |
764 //  |                   |__`pending_background_events`
765 //
766 pub struct ChannelManager<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
767 where
768         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
769         T::Target: BroadcasterInterface,
770         ES::Target: EntropySource,
771         NS::Target: NodeSigner,
772         SP::Target: SignerProvider,
773         F::Target: FeeEstimator,
774         R::Target: Router,
775         L::Target: Logger,
776 {
777         default_configuration: UserConfig,
778         genesis_hash: BlockHash,
779         fee_estimator: LowerBoundedFeeEstimator<F>,
780         chain_monitor: M,
781         tx_broadcaster: T,
782         #[allow(unused)]
783         router: R,
784
785         /// See `ChannelManager` struct-level documentation for lock order requirements.
786         #[cfg(test)]
787         pub(super) best_block: RwLock<BestBlock>,
788         #[cfg(not(test))]
789         best_block: RwLock<BestBlock>,
790         secp_ctx: Secp256k1<secp256k1::All>,
791
792         /// Storage for PaymentSecrets and any requirements on future inbound payments before we will
793         /// expose them to users via a PaymentClaimable event. HTLCs which do not meet the requirements
794         /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
795         /// after we generate a PaymentClaimable upon receipt of all MPP parts or when they time out.
796         ///
797         /// See `ChannelManager` struct-level documentation for lock order requirements.
798         pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
799
800         /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
801         /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
802         /// (if the channel has been force-closed), however we track them here to prevent duplicative
803         /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
804         /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
805         /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
806         /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
807         /// after reloading from disk while replaying blocks against ChannelMonitors.
808         ///
809         /// See `PendingOutboundPayment` documentation for more info.
810         ///
811         /// See `ChannelManager` struct-level documentation for lock order requirements.
812         pending_outbound_payments: OutboundPayments,
813
814         /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
815         ///
816         /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
817         /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
818         /// and via the classic SCID.
819         ///
820         /// Note that no consistency guarantees are made about the existence of a channel with the
821         /// `short_channel_id` here, nor the `short_channel_id` in the `PendingHTLCInfo`!
822         ///
823         /// See `ChannelManager` struct-level documentation for lock order requirements.
824         #[cfg(test)]
825         pub(super) forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
826         #[cfg(not(test))]
827         forward_htlcs: Mutex<HashMap<u64, Vec<HTLCForwardInfo>>>,
828         /// Storage for HTLCs that have been intercepted and bubbled up to the user. We hold them here
829         /// until the user tells us what we should do with them.
830         ///
831         /// See `ChannelManager` struct-level documentation for lock order requirements.
832         pending_intercepted_htlcs: Mutex<HashMap<InterceptId, PendingAddHTLCInfo>>,
833
834         /// The sets of payments which are claimable or currently being claimed. See
835         /// [`ClaimablePayments`]' individual field docs for more info.
836         ///
837         /// See `ChannelManager` struct-level documentation for lock order requirements.
838         claimable_payments: Mutex<ClaimablePayments>,
839
840         /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
841         /// and some closed channels which reached a usable state prior to being closed. This is used
842         /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
843         /// active channel list on load.
844         ///
845         /// See `ChannelManager` struct-level documentation for lock order requirements.
846         outbound_scid_aliases: Mutex<HashSet<u64>>,
847
848         /// `channel_id` -> `counterparty_node_id`.
849         ///
850         /// Only `channel_id`s are allowed as keys in this map, and not `temporary_channel_id`s. As
851         /// multiple channels with the same `temporary_channel_id` to different peers can exist,
852         /// allowing `temporary_channel_id`s in this map would cause collisions for such channels.
853         ///
854         /// Note that this map should only be used for `MonitorEvent` handling, to be able to access
855         /// the corresponding channel for the event, as we only have access to the `channel_id` during
856         /// the handling of the events.
857         ///
858         /// Note that no consistency guarantees are made about the existence of a peer with the
859         /// `counterparty_node_id` in our other maps.
860         ///
861         /// TODO:
862         /// The `counterparty_node_id` isn't passed with `MonitorEvent`s currently. To pass it, we need
863         /// to make `counterparty_node_id`'s a required field in `ChannelMonitor`s, which unfortunately
864         /// would break backwards compatability.
865         /// We should add `counterparty_node_id`s to `MonitorEvent`s, and eventually rely on it in the
866         /// future. That would make this map redundant, as only the `ChannelManager::per_peer_state` is
867         /// required to access the channel with the `counterparty_node_id`.
868         ///
869         /// See `ChannelManager` struct-level documentation for lock order requirements.
870         id_to_peer: Mutex<HashMap<[u8; 32], PublicKey>>,
871
872         /// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
873         ///
874         /// Outbound SCID aliases are added here once the channel is available for normal use, with
875         /// SCIDs being added once the funding transaction is confirmed at the channel's required
876         /// confirmation depth.
877         ///
878         /// Note that while this holds `counterparty_node_id`s and `channel_id`s, no consistency
879         /// guarantees are made about the existence of a peer with the `counterparty_node_id` nor a
880         /// channel with the `channel_id` in our other maps.
881         ///
882         /// See `ChannelManager` struct-level documentation for lock order requirements.
883         #[cfg(test)]
884         pub(super) short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
885         #[cfg(not(test))]
886         short_to_chan_info: FairRwLock<HashMap<u64, (PublicKey, [u8; 32])>>,
887
888         our_network_pubkey: PublicKey,
889
890         inbound_payment_key: inbound_payment::ExpandedKey,
891
892         /// LDK puts the [fake scids] that it generates into namespaces, to identify the type of an
893         /// incoming payment. To make it harder for a third-party to identify the type of a payment,
894         /// we encrypt the namespace identifier using these bytes.
895         ///
896         /// [fake scids]: crate::util::scid_utils::fake_scid
897         fake_scid_rand_bytes: [u8; 32],
898
899         /// When we send payment probes, we generate the [`PaymentHash`] based on this cookie secret
900         /// and a random [`PaymentId`]. This allows us to discern probes from real payments, without
901         /// keeping additional state.
902         probing_cookie_secret: [u8; 32],
903
904         /// The highest block timestamp we've seen, which is usually a good guess at the current time.
905         /// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
906         /// very far in the past, and can only ever be up to two hours in the future.
907         highest_seen_timestamp: AtomicUsize,
908
909         /// The bulk of our storage. Currently the `per_peer_state` stores our channels on a per-peer
910         /// basis, as well as the peer's latest features.
911         ///
912         /// If we are connected to a peer we always at least have an entry here, even if no channels
913         /// are currently open with that peer.
914         ///
915         /// Because adding or removing an entry is rare, we usually take an outer read lock and then
916         /// operate on the inner value freely. This opens up for parallel per-peer operation for
917         /// channels.
918         ///
919         /// Note that the same thread must never acquire two inner `PeerState` locks at the same time.
920         ///
921         /// See `ChannelManager` struct-level documentation for lock order requirements.
922         #[cfg(not(any(test, feature = "_test_utils")))]
923         per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
924         #[cfg(any(test, feature = "_test_utils"))]
925         pub(super) per_peer_state: FairRwLock<HashMap<PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>>>,
926
927         /// See `ChannelManager` struct-level documentation for lock order requirements.
928         pending_events: Mutex<Vec<events::Event>>,
929         /// See `ChannelManager` struct-level documentation for lock order requirements.
930         pending_background_events: Mutex<Vec<BackgroundEvent>>,
931         /// Used when we have to take a BIG lock to make sure everything is self-consistent.
932         /// Essentially just when we're serializing ourselves out.
933         /// Taken first everywhere where we are making changes before any other locks.
934         /// When acquiring this lock in read mode, rather than acquiring it directly, call
935         /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
936         /// Notifier the lock contains sends out a notification when the lock is released.
937         total_consistency_lock: RwLock<()>,
938
939         persistence_notifier: Notifier,
940
941         entropy_source: ES,
942         node_signer: NS,
943         signer_provider: SP,
944
945         logger: L,
946 }
947
948 /// Chain-related parameters used to construct a new `ChannelManager`.
949 ///
950 /// Typically, the block-specific parameters are derived from the best block hash for the network,
951 /// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
952 /// are not needed when deserializing a previously constructed `ChannelManager`.
953 #[derive(Clone, Copy, PartialEq)]
954 pub struct ChainParameters {
955         /// The network for determining the `chain_hash` in Lightning messages.
956         pub network: Network,
957
958         /// The hash and height of the latest block successfully connected.
959         ///
960         /// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
961         pub best_block: BestBlock,
962 }
963
964 #[derive(Copy, Clone, PartialEq)]
965 enum NotifyOption {
966         DoPersist,
967         SkipPersist,
968 }
969
970 /// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
971 /// desirable to notify any listeners on `await_persistable_update_timeout`/
972 /// `await_persistable_update` when new updates are available for persistence. Therefore, this
973 /// struct is responsible for locking the total consistency lock and, upon going out of scope,
974 /// sending the aforementioned notification (since the lock being released indicates that the
975 /// updates are ready for persistence).
976 ///
977 /// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
978 /// notify or not based on whether relevant changes have been made, providing a closure to
979 /// `optionally_notify` which returns a `NotifyOption`.
980 struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
981         persistence_notifier: &'a Notifier,
982         should_persist: F,
983         // We hold onto this result so the lock doesn't get released immediately.
984         _read_guard: RwLockReadGuard<'a, ()>,
985 }
986
987 impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
988         fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a Notifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
989                 PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
990         }
991
992         fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a Notifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
993                 let read_guard = lock.read().unwrap();
994
995                 PersistenceNotifierGuard {
996                         persistence_notifier: notifier,
997                         should_persist: persist_check,
998                         _read_guard: read_guard,
999                 }
1000         }
1001 }
1002
1003 impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
1004         fn drop(&mut self) {
1005                 if (self.should_persist)() == NotifyOption::DoPersist {
1006                         self.persistence_notifier.notify();
1007                 }
1008         }
1009 }
1010
1011 /// The amount of time in blocks we require our counterparty wait to claim their money (ie time
1012 /// between when we, or our watchtower, must check for them having broadcast a theft transaction).
1013 ///
1014 /// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
1015 ///
1016 /// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
1017 pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
1018 /// The amount of time in blocks we're willing to wait to claim money back to us. This matches
1019 /// the maximum required amount in lnd as of March 2021.
1020 pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
1021
1022 /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
1023 /// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
1024 ///
1025 /// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
1026 ///
1027 /// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
1028 // This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
1029 // i.e. the node we forwarded the payment on to should always have enough room to reliably time out
1030 // the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
1031 // CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
1032 pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
1033 // This should be long enough to allow a payment path drawn across multiple routing hops with substantial
1034 // `cltv_expiry_delta`. Indeed, the length of those values is the reaction delay offered to a routing node
1035 // in case of HTLC on-chain settlement. While appearing less competitive, a node operator could decide to
1036 // scale them up to suit its security policy. At the network-level, we shouldn't constrain them too much,
1037 // while avoiding to introduce a DoS vector. Further, a low CTLV_FAR_FAR_AWAY could be a source of
1038 // routing failure for any HTLC sender picking up an LDK node among the first hops.
1039 pub(super) const CLTV_FAR_FAR_AWAY: u32 = 14 * 24 * 6;
1040
1041 /// Minimum CLTV difference between the current block height and received inbound payments.
1042 /// Invoices generated for payment to us must set their `min_final_cltv_expiry_delta` field to at least
1043 /// this value.
1044 // Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
1045 // any payments to succeed. Further, we don't want payments to fail if a block was found while
1046 // a payment was being routed, so we add an extra block to be safe.
1047 pub const MIN_FINAL_CLTV_EXPIRY_DELTA: u16 = HTLC_FAIL_BACK_BUFFER as u16 + 3;
1048
1049 // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
1050 // ie that if the next-hop peer fails the HTLC within
1051 // LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
1052 // then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
1053 // failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
1054 // LATENCY_GRACE_PERIOD_BLOCKS.
1055 #[deny(const_err)]
1056 #[allow(dead_code)]
1057 const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
1058
1059 // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
1060 // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
1061 #[deny(const_err)]
1062 #[allow(dead_code)]
1063 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
1064
1065 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
1066 pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
1067
1068 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until we time-out the
1069 /// idempotency of payments by [`PaymentId`]. See
1070 /// [`OutboundPayments::remove_stale_resolved_payments`].
1071 pub(crate) const IDEMPOTENCY_TIMEOUT_TICKS: u8 = 7;
1072
1073 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is disconnected
1074 /// until we mark the channel disabled and gossip the update.
1075 pub(crate) const DISABLE_GOSSIP_TICKS: u8 = 10;
1076
1077 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] where a peer is connected until
1078 /// we mark the channel enabled and gossip the update.
1079 pub(crate) const ENABLE_GOSSIP_TICKS: u8 = 5;
1080
1081 /// The maximum number of unfunded channels we can have per-peer before we start rejecting new
1082 /// (inbound) ones. The number of peers with unfunded channels is limited separately in
1083 /// [`MAX_UNFUNDED_CHANNEL_PEERS`].
1084 const MAX_UNFUNDED_CHANS_PER_PEER: usize = 4;
1085
1086 /// The maximum number of peers from which we will allow pending unfunded channels. Once we reach
1087 /// this many peers we reject new (inbound) channels from peers with which we don't have a channel.
1088 const MAX_UNFUNDED_CHANNEL_PEERS: usize = 50;
1089
1090 /// The maximum number of peers which we do not have a (funded) channel with. Once we reach this
1091 /// many peers we reject new (inbound) connections.
1092 const MAX_NO_CHANNEL_PEERS: usize = 250;
1093
1094 /// Information needed for constructing an invoice route hint for this channel.
1095 #[derive(Clone, Debug, PartialEq)]
1096 pub struct CounterpartyForwardingInfo {
1097         /// Base routing fee in millisatoshis.
1098         pub fee_base_msat: u32,
1099         /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
1100         pub fee_proportional_millionths: u32,
1101         /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
1102         /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
1103         /// `cltv_expiry_delta` for more details.
1104         pub cltv_expiry_delta: u16,
1105 }
1106
1107 /// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
1108 /// to better separate parameters.
1109 #[derive(Clone, Debug, PartialEq)]
1110 pub struct ChannelCounterparty {
1111         /// The node_id of our counterparty
1112         pub node_id: PublicKey,
1113         /// The Features the channel counterparty provided upon last connection.
1114         /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
1115         /// many routing-relevant features are present in the init context.
1116         pub features: InitFeatures,
1117         /// The value, in satoshis, that must always be held in the channel for our counterparty. This
1118         /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
1119         /// claiming at least this value on chain.
1120         ///
1121         /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
1122         ///
1123         /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
1124         pub unspendable_punishment_reserve: u64,
1125         /// Information on the fees and requirements that the counterparty requires when forwarding
1126         /// payments to us through this channel.
1127         pub forwarding_info: Option<CounterpartyForwardingInfo>,
1128         /// The smallest value HTLC (in msat) the remote peer will accept, for this channel. This field
1129         /// is only `None` before we have received either the `OpenChannel` or `AcceptChannel` message
1130         /// from the remote peer, or for `ChannelCounterparty` objects serialized prior to LDK 0.0.107.
1131         pub outbound_htlc_minimum_msat: Option<u64>,
1132         /// The largest value HTLC (in msat) the remote peer currently will accept, for this channel.
1133         pub outbound_htlc_maximum_msat: Option<u64>,
1134 }
1135
1136 /// Details of a channel, as returned by [`ChannelManager::list_channels`] and [`ChannelManager::list_usable_channels`]
1137 #[derive(Clone, Debug, PartialEq)]
1138 pub struct ChannelDetails {
1139         /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
1140         /// thereafter this is the txid of the funding transaction xor the funding transaction output).
1141         /// Note that this means this value is *not* persistent - it can change once during the
1142         /// lifetime of the channel.
1143         pub channel_id: [u8; 32],
1144         /// Parameters which apply to our counterparty. See individual fields for more information.
1145         pub counterparty: ChannelCounterparty,
1146         /// The Channel's funding transaction output, if we've negotiated the funding transaction with
1147         /// our counterparty already.
1148         ///
1149         /// Note that, if this has been set, `channel_id` will be equivalent to
1150         /// `funding_txo.unwrap().to_channel_id()`.
1151         pub funding_txo: Option<OutPoint>,
1152         /// The features which this channel operates with. See individual features for more info.
1153         ///
1154         /// `None` until negotiation completes and the channel type is finalized.
1155         pub channel_type: Option<ChannelTypeFeatures>,
1156         /// The position of the funding transaction in the chain. None if the funding transaction has
1157         /// not yet been confirmed and the channel fully opened.
1158         ///
1159         /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound
1160         /// payments instead of this. See [`get_inbound_payment_scid`].
1161         ///
1162         /// For channels with [`confirmations_required`] set to `Some(0)`, [`outbound_scid_alias`] may
1163         /// be used in place of this in outbound routes. See [`get_outbound_payment_scid`].
1164         ///
1165         /// [`inbound_scid_alias`]: Self::inbound_scid_alias
1166         /// [`outbound_scid_alias`]: Self::outbound_scid_alias
1167         /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid
1168         /// [`get_outbound_payment_scid`]: Self::get_outbound_payment_scid
1169         /// [`confirmations_required`]: Self::confirmations_required
1170         pub short_channel_id: Option<u64>,
1171         /// An optional [`short_channel_id`] alias for this channel, randomly generated by us and
1172         /// usable in place of [`short_channel_id`] to reference the channel in outbound routes when
1173         /// the channel has not yet been confirmed (as long as [`confirmations_required`] is
1174         /// `Some(0)`).
1175         ///
1176         /// This will be `None` as long as the channel is not available for routing outbound payments.
1177         ///
1178         /// [`short_channel_id`]: Self::short_channel_id
1179         /// [`confirmations_required`]: Self::confirmations_required
1180         pub outbound_scid_alias: Option<u64>,
1181         /// An optional [`short_channel_id`] alias for this channel, randomly generated by our
1182         /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our
1183         /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
1184         /// when they see a payment to be routed to us.
1185         ///
1186         /// Our counterparty may choose to rotate this value at any time, though will always recognize
1187         /// previous values for inbound payment forwarding.
1188         ///
1189         /// [`short_channel_id`]: Self::short_channel_id
1190         pub inbound_scid_alias: Option<u64>,
1191         /// The value, in satoshis, of this channel as appears in the funding output
1192         pub channel_value_satoshis: u64,
1193         /// The value, in satoshis, that must always be held in the channel for us. This value ensures
1194         /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least
1195         /// this value on chain.
1196         ///
1197         /// This value is not included in [`outbound_capacity_msat`] as it can never be spent.
1198         ///
1199         /// This value will be `None` for outbound channels until the counterparty accepts the channel.
1200         ///
1201         /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
1202         pub unspendable_punishment_reserve: Option<u64>,
1203         /// The `user_channel_id` passed in to create_channel, or a random value if the channel was
1204         /// inbound. This may be zero for inbound channels serialized with LDK versions prior to
1205         /// 0.0.113.
1206         pub user_channel_id: u128,
1207         /// The currently negotiated fee rate denominated in satoshi per 1000 weight units,
1208         /// which is applied to commitment and HTLC transactions.
1209         ///
1210         /// This value will be `None` for objects serialized with LDK versions prior to 0.0.115.
1211         pub feerate_sat_per_1000_weight: Option<u32>,
1212         /// Our total balance.  This is the amount we would get if we close the channel.
1213         /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
1214         /// amount is not likely to be recoverable on close.
1215         ///
1216         /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
1217         /// balance is not available for inclusion in new outbound HTLCs). This further does not include
1218         /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
1219         /// This does not consider any on-chain fees.
1220         ///
1221         /// See also [`ChannelDetails::outbound_capacity_msat`]
1222         pub balance_msat: u64,
1223         /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
1224         /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
1225         /// available for inclusion in new outbound HTLCs). This further does not include any pending
1226         /// outgoing HTLCs which are awaiting some other resolution to be sent.
1227         ///
1228         /// See also [`ChannelDetails::balance_msat`]
1229         ///
1230         /// This value is not exact. Due to various in-flight changes, feerate changes, and our
1231         /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
1232         /// should be able to spend nearly this amount.
1233         pub outbound_capacity_msat: u64,
1234         /// The available outbound capacity for sending a single HTLC to the remote peer. This is
1235         /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by
1236         /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
1237         /// to use a limit as close as possible to the HTLC limit we can currently send.
1238         ///
1239         /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
1240         pub next_outbound_htlc_limit_msat: u64,
1241         /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
1242         /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
1243         /// available for inclusion in new inbound HTLCs).
1244         /// Note that there are some corner cases not fully handled here, so the actual available
1245         /// inbound capacity may be slightly higher than this.
1246         ///
1247         /// This value is not exact. Due to various in-flight changes, feerate changes, and our
1248         /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable.
1249         /// However, our counterparty should be able to spend nearly this amount.
1250         pub inbound_capacity_msat: u64,
1251         /// The number of required confirmations on the funding transaction before the funding will be
1252         /// considered "locked". This number is selected by the channel fundee (i.e. us if
1253         /// [`is_outbound`] is *not* set), and can be selected for inbound channels with
1254         /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with
1255         /// [`ChannelHandshakeLimits::max_minimum_depth`].
1256         ///
1257         /// This value will be `None` for outbound channels until the counterparty accepts the channel.
1258         ///
1259         /// [`is_outbound`]: ChannelDetails::is_outbound
1260         /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
1261         /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
1262         pub confirmations_required: Option<u32>,
1263         /// The current number of confirmations on the funding transaction.
1264         ///
1265         /// This value will be `None` for objects serialized with LDK versions prior to 0.0.113.
1266         pub confirmations: Option<u32>,
1267         /// The number of blocks (after our commitment transaction confirms) that we will need to wait
1268         /// until we can claim our funds after we force-close the channel. During this time our
1269         /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
1270         /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any
1271         /// time to claim our non-HTLC-encumbered funds.
1272         ///
1273         /// This value will be `None` for outbound channels until the counterparty accepts the channel.
1274         pub force_close_spend_delay: Option<u16>,
1275         /// True if the channel was initiated (and thus funded) by us.
1276         pub is_outbound: bool,
1277         /// True if the channel is confirmed, channel_ready messages have been exchanged, and the
1278         /// channel is not currently being shut down. `channel_ready` message exchange implies the
1279         /// required confirmation count has been reached (and we were connected to the peer at some
1280         /// point after the funding transaction received enough confirmations). The required
1281         /// confirmation count is provided in [`confirmations_required`].
1282         ///
1283         /// [`confirmations_required`]: ChannelDetails::confirmations_required
1284         pub is_channel_ready: bool,
1285         /// True if the channel is (a) confirmed and channel_ready messages have been exchanged, (b)
1286         /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
1287         ///
1288         /// This is a strict superset of `is_channel_ready`.
1289         pub is_usable: bool,
1290         /// True if this channel is (or will be) publicly-announced.
1291         pub is_public: bool,
1292         /// The smallest value HTLC (in msat) we will accept, for this channel. This field
1293         /// is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.107
1294         pub inbound_htlc_minimum_msat: Option<u64>,
1295         /// The largest value HTLC (in msat) we currently will accept, for this channel.
1296         pub inbound_htlc_maximum_msat: Option<u64>,
1297         /// Set of configurable parameters that affect channel operation.
1298         ///
1299         /// This field is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.109.
1300         pub config: Option<ChannelConfig>,
1301 }
1302
1303 impl ChannelDetails {
1304         /// Gets the current SCID which should be used to identify this channel for inbound payments.
1305         /// This should be used for providing invoice hints or in any other context where our
1306         /// counterparty will forward a payment to us.
1307         ///
1308         /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the
1309         /// [`ChannelDetails::short_channel_id`]. See those for more information.
1310         pub fn get_inbound_payment_scid(&self) -> Option<u64> {
1311                 self.inbound_scid_alias.or(self.short_channel_id)
1312         }
1313
1314         /// Gets the current SCID which should be used to identify this channel for outbound payments.
1315         /// This should be used in [`Route`]s to describe the first hop or in other contexts where
1316         /// we're sending or forwarding a payment outbound over this channel.
1317         ///
1318         /// This is either the [`ChannelDetails::short_channel_id`], if set, or the
1319         /// [`ChannelDetails::outbound_scid_alias`]. See those for more information.
1320         pub fn get_outbound_payment_scid(&self) -> Option<u64> {
1321                 self.short_channel_id.or(self.outbound_scid_alias)
1322         }
1323
1324         fn from_channel<Signer: WriteableEcdsaChannelSigner>(channel: &Channel<Signer>,
1325                 best_block_height: u32, latest_features: InitFeatures) -> Self {
1326
1327                 let balance = channel.get_available_balances();
1328                 let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
1329                         channel.get_holder_counterparty_selected_channel_reserve_satoshis();
1330                 ChannelDetails {
1331                         channel_id: channel.channel_id(),
1332                         counterparty: ChannelCounterparty {
1333                                 node_id: channel.get_counterparty_node_id(),
1334                                 features: latest_features,
1335                                 unspendable_punishment_reserve: to_remote_reserve_satoshis,
1336                                 forwarding_info: channel.counterparty_forwarding_info(),
1337                                 // Ensures that we have actually received the `htlc_minimum_msat` value
1338                                 // from the counterparty through the `OpenChannel` or `AcceptChannel`
1339                                 // message (as they are always the first message from the counterparty).
1340                                 // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
1341                                 // default `0` value set by `Channel::new_outbound`.
1342                                 outbound_htlc_minimum_msat: if channel.have_received_message() {
1343                                         Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
1344                                 outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
1345                         },
1346                         funding_txo: channel.get_funding_txo(),
1347                         // Note that accept_channel (or open_channel) is always the first message, so
1348                         // `have_received_message` indicates that type negotiation has completed.
1349                         channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
1350                         short_channel_id: channel.get_short_channel_id(),
1351                         outbound_scid_alias: if channel.is_usable() { Some(channel.outbound_scid_alias()) } else { None },
1352                         inbound_scid_alias: channel.latest_inbound_scid_alias(),
1353                         channel_value_satoshis: channel.get_value_satoshis(),
1354                         feerate_sat_per_1000_weight: Some(channel.get_feerate_sat_per_1000_weight()),
1355                         unspendable_punishment_reserve: to_self_reserve_satoshis,
1356                         balance_msat: balance.balance_msat,
1357                         inbound_capacity_msat: balance.inbound_capacity_msat,
1358                         outbound_capacity_msat: balance.outbound_capacity_msat,
1359                         next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
1360                         user_channel_id: channel.get_user_id(),
1361                         confirmations_required: channel.minimum_depth(),
1362                         confirmations: Some(channel.get_funding_tx_confirmations(best_block_height)),
1363                         force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
1364                         is_outbound: channel.is_outbound(),
1365                         is_channel_ready: channel.is_usable(),
1366                         is_usable: channel.is_live(),
1367                         is_public: channel.should_announce(),
1368                         inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
1369                         inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat(),
1370                         config: Some(channel.config()),
1371                 }
1372         }
1373 }
1374
1375 /// Used by [`ChannelManager::list_recent_payments`] to express the status of recent payments.
1376 /// These include payments that have yet to find a successful path, or have unresolved HTLCs.
1377 #[derive(Debug, PartialEq)]
1378 pub enum RecentPaymentDetails {
1379         /// When a payment is still being sent and awaiting successful delivery.
1380         Pending {
1381                 /// Hash of the payment that is currently being sent but has yet to be fulfilled or
1382                 /// abandoned.
1383                 payment_hash: PaymentHash,
1384                 /// Total amount (in msat, excluding fees) across all paths for this payment,
1385                 /// not just the amount currently inflight.
1386                 total_msat: u64,
1387         },
1388         /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
1389         /// been resolved. Upon receiving [`Event::PaymentSent`], we delay for a few minutes before the
1390         /// payment is removed from tracking.
1391         Fulfilled {
1392                 /// Hash of the payment that was claimed. `None` for serializations of [`ChannelManager`]
1393                 /// made before LDK version 0.0.104.
1394                 payment_hash: Option<PaymentHash>,
1395         },
1396         /// After a payment's retries are exhausted per the provided [`Retry`], or it is explicitly
1397         /// abandoned via [`ChannelManager::abandon_payment`], it is marked as abandoned until all
1398         /// pending HTLCs for this payment resolve and an [`Event::PaymentFailed`] is generated.
1399         Abandoned {
1400                 /// Hash of the payment that we have given up trying to send.
1401                 payment_hash: PaymentHash,
1402         },
1403 }
1404
1405 /// Route hints used in constructing invoices for [phantom node payents].
1406 ///
1407 /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
1408 #[derive(Clone)]
1409 pub struct PhantomRouteHints {
1410         /// The list of channels to be included in the invoice route hints.
1411         pub channels: Vec<ChannelDetails>,
1412         /// A fake scid used for representing the phantom node's fake channel in generating the invoice
1413         /// route hints.
1414         pub phantom_scid: u64,
1415         /// The pubkey of the real backing node that would ultimately receive the payment.
1416         pub real_node_pubkey: PublicKey,
1417 }
1418
1419 macro_rules! handle_error {
1420         ($self: ident, $internal: expr, $counterparty_node_id: expr) => { {
1421                 // In testing, ensure there are no deadlocks where the lock is already held upon
1422                 // entering the macro.
1423                 debug_assert_ne!($self.pending_events.held_by_thread(), LockHeldState::HeldByThread);
1424                 debug_assert_ne!($self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
1425
1426                 match $internal {
1427                         Ok(msg) => Ok(msg),
1428                         Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
1429                                 let mut msg_events = Vec::with_capacity(2);
1430
1431                                 if let Some((shutdown_res, update_option)) = shutdown_finish {
1432                                         $self.finish_force_close_channel(shutdown_res);
1433                                         if let Some(update) = update_option {
1434                                                 msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1435                                                         msg: update
1436                                                 });
1437                                         }
1438                                         if let Some((channel_id, user_channel_id)) = chan_id {
1439                                                 $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed {
1440                                                         channel_id, user_channel_id,
1441                                                         reason: ClosureReason::ProcessingError { err: err.err.clone() }
1442                                                 });
1443                                         }
1444                                 }
1445
1446                                 log_error!($self.logger, "{}", err.err);
1447                                 if let msgs::ErrorAction::IgnoreError = err.action {
1448                                 } else {
1449                                         msg_events.push(events::MessageSendEvent::HandleError {
1450                                                 node_id: $counterparty_node_id,
1451                                                 action: err.action.clone()
1452                                         });
1453                                 }
1454
1455                                 if !msg_events.is_empty() {
1456                                         let per_peer_state = $self.per_peer_state.read().unwrap();
1457                                         if let Some(peer_state_mutex) = per_peer_state.get(&$counterparty_node_id) {
1458                                                 let mut peer_state = peer_state_mutex.lock().unwrap();
1459                                                 peer_state.pending_msg_events.append(&mut msg_events);
1460                                         }
1461                                 }
1462
1463                                 // Return error in case higher-API need one
1464                                 Err(err)
1465                         },
1466                 }
1467         } }
1468 }
1469
1470 macro_rules! update_maps_on_chan_removal {
1471         ($self: expr, $channel: expr) => {{
1472                 $self.id_to_peer.lock().unwrap().remove(&$channel.channel_id());
1473                 let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
1474                 if let Some(short_id) = $channel.get_short_channel_id() {
1475                         short_to_chan_info.remove(&short_id);
1476                 } else {
1477                         // If the channel was never confirmed on-chain prior to its closure, remove the
1478                         // outbound SCID alias we used for it from the collision-prevention set. While we
1479                         // generally want to avoid ever re-using an outbound SCID alias across all channels, we
1480                         // also don't want a counterparty to be able to trivially cause a memory leak by simply
1481                         // opening a million channels with us which are closed before we ever reach the funding
1482                         // stage.
1483                         let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
1484                         debug_assert!(alias_removed);
1485                 }
1486                 short_to_chan_info.remove(&$channel.outbound_scid_alias());
1487         }}
1488 }
1489
1490 /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
1491 macro_rules! convert_chan_err {
1492         ($self: ident, $err: expr, $channel: expr, $channel_id: expr) => {
1493                 match $err {
1494                         ChannelError::Warn(msg) => {
1495                                 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
1496                         },
1497                         ChannelError::Ignore(msg) => {
1498                                 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
1499                         },
1500                         ChannelError::Close(msg) => {
1501                                 log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
1502                                 update_maps_on_chan_removal!($self, $channel);
1503                                 let shutdown_res = $channel.force_shutdown(true);
1504                                 (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
1505                                         shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
1506                         },
1507                 }
1508         }
1509 }
1510
1511 macro_rules! break_chan_entry {
1512         ($self: ident, $res: expr, $entry: expr) => {
1513                 match $res {
1514                         Ok(res) => res,
1515                         Err(e) => {
1516                                 let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
1517                                 if drop {
1518                                         $entry.remove_entry();
1519                                 }
1520                                 break Err(res);
1521                         }
1522                 }
1523         }
1524 }
1525
1526 macro_rules! try_chan_entry {
1527         ($self: ident, $res: expr, $entry: expr) => {
1528                 match $res {
1529                         Ok(res) => res,
1530                         Err(e) => {
1531                                 let (drop, res) = convert_chan_err!($self, e, $entry.get_mut(), $entry.key());
1532                                 if drop {
1533                                         $entry.remove_entry();
1534                                 }
1535                                 return Err(res);
1536                         }
1537                 }
1538         }
1539 }
1540
1541 macro_rules! remove_channel {
1542         ($self: expr, $entry: expr) => {
1543                 {
1544                         let channel = $entry.remove_entry().1;
1545                         update_maps_on_chan_removal!($self, channel);
1546                         channel
1547                 }
1548         }
1549 }
1550
1551 macro_rules! send_channel_ready {
1552         ($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
1553                 $pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
1554                         node_id: $channel.get_counterparty_node_id(),
1555                         msg: $channel_ready_msg,
1556                 });
1557                 // Note that we may send a `channel_ready` multiple times for a channel if we reconnect, so
1558                 // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
1559                 let mut short_to_chan_info = $self.short_to_chan_info.write().unwrap();
1560                 let outbound_alias_insert = short_to_chan_info.insert($channel.outbound_scid_alias(), ($channel.get_counterparty_node_id(), $channel.channel_id()));
1561                 assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
1562                         "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
1563                 if let Some(real_scid) = $channel.get_short_channel_id() {
1564                         let scid_insert = short_to_chan_info.insert(real_scid, ($channel.get_counterparty_node_id(), $channel.channel_id()));
1565                         assert!(scid_insert.is_none() || scid_insert.unwrap() == ($channel.get_counterparty_node_id(), $channel.channel_id()),
1566                                 "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
1567                 }
1568         }}
1569 }
1570
1571 macro_rules! emit_channel_pending_event {
1572         ($locked_events: expr, $channel: expr) => {
1573                 if $channel.should_emit_channel_pending_event() {
1574                         $locked_events.push(events::Event::ChannelPending {
1575                                 channel_id: $channel.channel_id(),
1576                                 former_temporary_channel_id: $channel.temporary_channel_id(),
1577                                 counterparty_node_id: $channel.get_counterparty_node_id(),
1578                                 user_channel_id: $channel.get_user_id(),
1579                                 funding_txo: $channel.get_funding_txo().unwrap().into_bitcoin_outpoint(),
1580                         });
1581                         $channel.set_channel_pending_event_emitted();
1582                 }
1583         }
1584 }
1585
1586 macro_rules! emit_channel_ready_event {
1587         ($locked_events: expr, $channel: expr) => {
1588                 if $channel.should_emit_channel_ready_event() {
1589                         debug_assert!($channel.channel_pending_event_emitted());
1590                         $locked_events.push(events::Event::ChannelReady {
1591                                 channel_id: $channel.channel_id(),
1592                                 user_channel_id: $channel.get_user_id(),
1593                                 counterparty_node_id: $channel.get_counterparty_node_id(),
1594                                 channel_type: $channel.get_channel_type().clone(),
1595                         });
1596                         $channel.set_channel_ready_event_emitted();
1597                 }
1598         }
1599 }
1600
1601 macro_rules! handle_monitor_update_completion {
1602         ($self: ident, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { {
1603                 let mut updates = $chan.monitor_updating_restored(&$self.logger,
1604                         &$self.node_signer, $self.genesis_hash, &$self.default_configuration,
1605                         $self.best_block.read().unwrap().height());
1606                 let counterparty_node_id = $chan.get_counterparty_node_id();
1607                 let channel_update = if updates.channel_ready.is_some() && $chan.is_usable() {
1608                         // We only send a channel_update in the case where we are just now sending a
1609                         // channel_ready and the channel is in a usable state. We may re-send a
1610                         // channel_update later through the announcement_signatures process for public
1611                         // channels, but there's no reason not to just inform our counterparty of our fees
1612                         // now.
1613                         if let Ok(msg) = $self.get_channel_update_for_unicast($chan) {
1614                                 Some(events::MessageSendEvent::SendChannelUpdate {
1615                                         node_id: counterparty_node_id,
1616                                         msg,
1617                                 })
1618                         } else { None }
1619                 } else { None };
1620
1621                 let update_actions = $peer_state.monitor_update_blocked_actions
1622                         .remove(&$chan.channel_id()).unwrap_or(Vec::new());
1623
1624                 let htlc_forwards = $self.handle_channel_resumption(
1625                         &mut $peer_state.pending_msg_events, $chan, updates.raa,
1626                         updates.commitment_update, updates.order, updates.accepted_htlcs,
1627                         updates.funding_broadcastable, updates.channel_ready,
1628                         updates.announcement_sigs);
1629                 if let Some(upd) = channel_update {
1630                         $peer_state.pending_msg_events.push(upd);
1631                 }
1632
1633                 let channel_id = $chan.channel_id();
1634                 core::mem::drop($peer_state_lock);
1635                 core::mem::drop($per_peer_state_lock);
1636
1637                 $self.handle_monitor_update_completion_actions(update_actions);
1638
1639                 if let Some(forwards) = htlc_forwards {
1640                         $self.forward_htlcs(&mut [forwards][..]);
1641                 }
1642                 $self.finalize_claims(updates.finalized_claimed_htlcs);
1643                 for failure in updates.failed_htlcs.drain(..) {
1644                         let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
1645                         $self.fail_htlc_backwards_internal(&failure.0, &failure.1, &failure.2, receiver);
1646                 }
1647         } }
1648 }
1649
1650 macro_rules! handle_new_monitor_update {
1651         ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
1652                 // update_maps_on_chan_removal needs to be able to take id_to_peer, so make sure we can in
1653                 // any case so that it won't deadlock.
1654                 debug_assert_ne!($self.id_to_peer.held_by_thread(), LockHeldState::HeldByThread);
1655                 match $update_res {
1656                         ChannelMonitorUpdateStatus::InProgress => {
1657                                 log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
1658                                         log_bytes!($chan.channel_id()[..]));
1659                                 Ok(())
1660                         },
1661                         ChannelMonitorUpdateStatus::PermanentFailure => {
1662                                 log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure",
1663                                         log_bytes!($chan.channel_id()[..]));
1664                                 update_maps_on_chan_removal!($self, $chan);
1665                                 let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown(
1666                                         "ChannelMonitor storage failure".to_owned(), $chan.channel_id(),
1667                                         $chan.get_user_id(), $chan.force_shutdown(false),
1668                                         $self.get_channel_update_for_broadcast(&$chan).ok()));
1669                                 $remove;
1670                                 res
1671                         },
1672                         ChannelMonitorUpdateStatus::Completed => {
1673                                 if ($update_id == 0 || $chan.get_next_monitor_update()
1674                                         .expect("We can't be processing a monitor update if it isn't queued")
1675                                         .update_id == $update_id) &&
1676                                         $chan.get_latest_monitor_update_id() == $update_id
1677                                 {
1678                                         handle_monitor_update_completion!($self, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan);
1679                                 }
1680                                 Ok(())
1681                         },
1682                 }
1683         } };
1684         ($self: ident, $update_res: expr, $update_id: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan_entry: expr) => {
1685                 handle_new_monitor_update!($self, $update_res, $update_id, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan_entry.get_mut(), MANUALLY_REMOVING, $chan_entry.remove_entry())
1686         }
1687 }
1688
1689 macro_rules! process_events_body {
1690         ($self: expr, $event_to_handle: expr, $handle_event: expr) => {
1691                 // We'll acquire our total consistency lock until the returned future completes so that
1692                 // we can be sure no other persists happen while processing events.
1693                 let _read_guard = $self.total_consistency_lock.read().unwrap();
1694
1695                 let mut result = NotifyOption::SkipPersist;
1696
1697                 // TODO: This behavior should be documented. It's unintuitive that we query
1698                 // ChannelMonitors when clearing other events.
1699                 if $self.process_pending_monitor_events() {
1700                         result = NotifyOption::DoPersist;
1701                 }
1702
1703                 let pending_events = mem::replace(&mut *$self.pending_events.lock().unwrap(), vec![]);
1704                 if !pending_events.is_empty() {
1705                         result = NotifyOption::DoPersist;
1706                 }
1707
1708                 for event in pending_events {
1709                         $event_to_handle = event;
1710                         $handle_event;
1711                 }
1712
1713                 if result == NotifyOption::DoPersist {
1714                         $self.persistence_notifier.notify();
1715                 }
1716         }
1717 }
1718
1719 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
1720 where
1721         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
1722         T::Target: BroadcasterInterface,
1723         ES::Target: EntropySource,
1724         NS::Target: NodeSigner,
1725         SP::Target: SignerProvider,
1726         F::Target: FeeEstimator,
1727         R::Target: Router,
1728         L::Target: Logger,
1729 {
1730         /// Constructs a new `ChannelManager` to hold several channels and route between them.
1731         ///
1732         /// This is the main "logic hub" for all channel-related actions, and implements
1733         /// [`ChannelMessageHandler`].
1734         ///
1735         /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
1736         ///
1737         /// Users need to notify the new `ChannelManager` when a new block is connected or
1738         /// disconnected using its [`block_connected`] and [`block_disconnected`] methods, starting
1739         /// from after [`params.best_block.block_hash`]. See [`chain::Listen`] and [`chain::Confirm`] for
1740         /// more details.
1741         ///
1742         /// [`block_connected`]: chain::Listen::block_connected
1743         /// [`block_disconnected`]: chain::Listen::block_disconnected
1744         /// [`params.best_block.block_hash`]: chain::BestBlock::block_hash
1745         pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, entropy_source: ES, node_signer: NS, signer_provider: SP, config: UserConfig, params: ChainParameters) -> Self {
1746                 let mut secp_ctx = Secp256k1::new();
1747                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1748                 let inbound_pmt_key_material = node_signer.get_inbound_payment_key_material();
1749                 let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
1750                 ChannelManager {
1751                         default_configuration: config.clone(),
1752                         genesis_hash: genesis_block(params.network).header.block_hash(),
1753                         fee_estimator: LowerBoundedFeeEstimator::new(fee_est),
1754                         chain_monitor,
1755                         tx_broadcaster,
1756                         router,
1757
1758                         best_block: RwLock::new(params.best_block),
1759
1760                         outbound_scid_aliases: Mutex::new(HashSet::new()),
1761                         pending_inbound_payments: Mutex::new(HashMap::new()),
1762                         pending_outbound_payments: OutboundPayments::new(),
1763                         forward_htlcs: Mutex::new(HashMap::new()),
1764                         claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs: HashMap::new(), pending_claiming_payments: HashMap::new() }),
1765                         pending_intercepted_htlcs: Mutex::new(HashMap::new()),
1766                         id_to_peer: Mutex::new(HashMap::new()),
1767                         short_to_chan_info: FairRwLock::new(HashMap::new()),
1768
1769                         our_network_pubkey: node_signer.get_node_id(Recipient::Node).unwrap(),
1770                         secp_ctx,
1771
1772                         inbound_payment_key: expanded_inbound_key,
1773                         fake_scid_rand_bytes: entropy_source.get_secure_random_bytes(),
1774
1775                         probing_cookie_secret: entropy_source.get_secure_random_bytes(),
1776
1777                         highest_seen_timestamp: AtomicUsize::new(0),
1778
1779                         per_peer_state: FairRwLock::new(HashMap::new()),
1780
1781                         pending_events: Mutex::new(Vec::new()),
1782                         pending_background_events: Mutex::new(Vec::new()),
1783                         total_consistency_lock: RwLock::new(()),
1784                         persistence_notifier: Notifier::new(),
1785
1786                         entropy_source,
1787                         node_signer,
1788                         signer_provider,
1789
1790                         logger,
1791                 }
1792         }
1793
1794         /// Gets the current configuration applied to all new channels.
1795         pub fn get_current_default_configuration(&self) -> &UserConfig {
1796                 &self.default_configuration
1797         }
1798
1799         fn create_and_insert_outbound_scid_alias(&self) -> u64 {
1800                 let height = self.best_block.read().unwrap().height();
1801                 let mut outbound_scid_alias = 0;
1802                 let mut i = 0;
1803                 loop {
1804                         if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
1805                                 outbound_scid_alias += 1;
1806                         } else {
1807                                 outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
1808                         }
1809                         if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
1810                                 break;
1811                         }
1812                         i += 1;
1813                         if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); }
1814                 }
1815                 outbound_scid_alias
1816         }
1817
1818         /// Creates a new outbound channel to the given remote node and with the given value.
1819         ///
1820         /// `user_channel_id` will be provided back as in
1821         /// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
1822         /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to a
1823         /// randomized value for inbound channels. `user_channel_id` has no meaning inside of LDK, it
1824         /// is simply copied to events and otherwise ignored.
1825         ///
1826         /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
1827         /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
1828         ///
1829         /// Note that we do not check if you are currently connected to the given peer. If no
1830         /// connection is available, the outbound `open_channel` message may fail to send, resulting in
1831         /// the channel eventually being silently forgotten (dropped on reload).
1832         ///
1833         /// Returns the new Channel's temporary `channel_id`. This ID will appear as
1834         /// [`Event::FundingGenerationReady::temporary_channel_id`] and in
1835         /// [`ChannelDetails::channel_id`] until after
1836         /// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for
1837         /// one derived from the funding transaction's TXID. If the counterparty rejects the channel
1838         /// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`].
1839         ///
1840         /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
1841         /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
1842         /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
1843         pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u128, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
1844                 if channel_value_satoshis < 1000 {
1845                         return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
1846                 }
1847
1848                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
1849                 // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
1850                 debug_assert!(&self.total_consistency_lock.try_write().is_err());
1851
1852                 let per_peer_state = self.per_peer_state.read().unwrap();
1853
1854                 let peer_state_mutex = per_peer_state.get(&their_network_key)
1855                         .ok_or_else(|| APIError::APIMisuseError{ err: format!("Not connected to node: {}", their_network_key) })?;
1856
1857                 let mut peer_state = peer_state_mutex.lock().unwrap();
1858                 let channel = {
1859                         let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
1860                         let their_features = &peer_state.latest_features;
1861                         let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
1862                         match Channel::new_outbound(&self.fee_estimator, &self.entropy_source, &self.signer_provider, their_network_key,
1863                                 their_features, channel_value_satoshis, push_msat, user_channel_id, config,
1864                                 self.best_block.read().unwrap().height(), outbound_scid_alias)
1865                         {
1866                                 Ok(res) => res,
1867                                 Err(e) => {
1868                                         self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
1869                                         return Err(e);
1870                                 },
1871                         }
1872                 };
1873                 let res = channel.get_open_channel(self.genesis_hash.clone());
1874
1875                 let temporary_channel_id = channel.channel_id();
1876                 match peer_state.channel_by_id.entry(temporary_channel_id) {
1877                         hash_map::Entry::Occupied(_) => {
1878                                 if cfg!(fuzzing) {
1879                                         return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
1880                                 } else {
1881                                         panic!("RNG is bad???");
1882                                 }
1883                         },
1884                         hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
1885                 }
1886
1887                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
1888                         node_id: their_network_key,
1889                         msg: res,
1890                 });
1891                 Ok(temporary_channel_id)
1892         }
1893
1894         fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<<SP::Target as SignerProvider>::Signer>)) -> bool + Copy>(&self, f: Fn) -> Vec<ChannelDetails> {
1895                 // Allocate our best estimate of the number of channels we have in the `res`
1896                 // Vec. Sadly the `short_to_chan_info` map doesn't cover channels without
1897                 // a scid or a scid alias, and the `id_to_peer` shouldn't be used outside
1898                 // of the ChannelMonitor handling. Therefore reallocations may still occur, but is
1899                 // unlikely as the `short_to_chan_info` map often contains 2 entries for
1900                 // the same channel.
1901                 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
1902                 {
1903                         let best_block_height = self.best_block.read().unwrap().height();
1904                         let per_peer_state = self.per_peer_state.read().unwrap();
1905                         for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
1906                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
1907                                 let peer_state = &mut *peer_state_lock;
1908                                 for (_channel_id, channel) in peer_state.channel_by_id.iter().filter(f) {
1909                                         let details = ChannelDetails::from_channel(channel, best_block_height,
1910                                                 peer_state.latest_features.clone());
1911                                         res.push(details);
1912                                 }
1913                         }
1914                 }
1915                 res
1916         }
1917
1918         /// Gets the list of open channels, in random order. See [`ChannelDetails`] field documentation for
1919         /// more information.
1920         pub fn list_channels(&self) -> Vec<ChannelDetails> {
1921                 self.list_channels_with_filter(|_| true)
1922         }
1923
1924         /// Gets the list of usable channels, in random order. Useful as an argument to
1925         /// [`Router::find_route`] to ensure non-announced channels are used.
1926         ///
1927         /// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
1928         /// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
1929         /// are.
1930         pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
1931                 // Note we use is_live here instead of usable which leads to somewhat confused
1932                 // internal/external nomenclature, but that's ok cause that's probably what the user
1933                 // really wanted anyway.
1934                 self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
1935         }
1936
1937         /// Gets the list of channels we have with a given counterparty, in random order.
1938         pub fn list_channels_with_counterparty(&self, counterparty_node_id: &PublicKey) -> Vec<ChannelDetails> {
1939                 let best_block_height = self.best_block.read().unwrap().height();
1940                 let per_peer_state = self.per_peer_state.read().unwrap();
1941
1942                 if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
1943                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
1944                         let peer_state = &mut *peer_state_lock;
1945                         let features = &peer_state.latest_features;
1946                         return peer_state.channel_by_id
1947                                 .iter()
1948                                 .map(|(_, channel)|
1949                                         ChannelDetails::from_channel(channel, best_block_height, features.clone()))
1950                                 .collect();
1951                 }
1952                 vec![]
1953         }
1954
1955         /// Returns in an undefined order recent payments that -- if not fulfilled -- have yet to find a
1956         /// successful path, or have unresolved HTLCs.
1957         ///
1958         /// This can be useful for payments that may have been prepared, but ultimately not sent, as a
1959         /// result of a crash. If such a payment exists, is not listed here, and an
1960         /// [`Event::PaymentSent`] has not been received, you may consider resending the payment.
1961         ///
1962         /// [`Event::PaymentSent`]: events::Event::PaymentSent
1963         pub fn list_recent_payments(&self) -> Vec<RecentPaymentDetails> {
1964                 self.pending_outbound_payments.pending_outbound_payments.lock().unwrap().iter()
1965                         .filter_map(|(_, pending_outbound_payment)| match pending_outbound_payment {
1966                                 PendingOutboundPayment::Retryable { payment_hash, total_msat, .. } => {
1967                                         Some(RecentPaymentDetails::Pending {
1968                                                 payment_hash: *payment_hash,
1969                                                 total_msat: *total_msat,
1970                                         })
1971                                 },
1972                                 PendingOutboundPayment::Abandoned { payment_hash, .. } => {
1973                                         Some(RecentPaymentDetails::Abandoned { payment_hash: *payment_hash })
1974                                 },
1975                                 PendingOutboundPayment::Fulfilled { payment_hash, .. } => {
1976                                         Some(RecentPaymentDetails::Fulfilled { payment_hash: *payment_hash })
1977                                 },
1978                                 PendingOutboundPayment::Legacy { .. } => None
1979                         })
1980                         .collect()
1981         }
1982
1983         /// Helper function that issues the channel close events
1984         fn issue_channel_close_events(&self, channel: &Channel<<SP::Target as SignerProvider>::Signer>, closure_reason: ClosureReason) {
1985                 let mut pending_events_lock = self.pending_events.lock().unwrap();
1986                 match channel.unbroadcasted_funding() {
1987                         Some(transaction) => {
1988                                 pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
1989                         },
1990                         None => {},
1991                 }
1992                 pending_events_lock.push(events::Event::ChannelClosed {
1993                         channel_id: channel.channel_id(),
1994                         user_channel_id: channel.get_user_id(),
1995                         reason: closure_reason
1996                 });
1997         }
1998
1999         fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
2000                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2001
2002                 let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2003                 let result: Result<(), _> = loop {
2004                         let per_peer_state = self.per_peer_state.read().unwrap();
2005
2006                         let peer_state_mutex = per_peer_state.get(counterparty_node_id)
2007                                 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
2008
2009                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2010                         let peer_state = &mut *peer_state_lock;
2011                         match peer_state.channel_by_id.entry(channel_id.clone()) {
2012                                 hash_map::Entry::Occupied(mut chan_entry) => {
2013                                         let funding_txo_opt = chan_entry.get().get_funding_txo();
2014                                         let their_features = &peer_state.latest_features;
2015                                         let (shutdown_msg, mut monitor_update_opt, htlcs) = chan_entry.get_mut()
2016                                                 .get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight)?;
2017                                         failed_htlcs = htlcs;
2018
2019                                         // We can send the `shutdown` message before updating the `ChannelMonitor`
2020                                         // here as we don't need the monitor update to complete until we send a
2021                                         // `shutdown_signed`, which we'll delay if we're pending a monitor update.
2022                                         peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
2023                                                 node_id: *counterparty_node_id,
2024                                                 msg: shutdown_msg,
2025                                         });
2026
2027                                         // Update the monitor with the shutdown script if necessary.
2028                                         if let Some(monitor_update) = monitor_update_opt.take() {
2029                                                 let update_id = monitor_update.update_id;
2030                                                 let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
2031                                                 break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
2032                                         }
2033
2034                                         if chan_entry.get().is_shutdown() {
2035                                                 let channel = remove_channel!(self, chan_entry);
2036                                                 if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
2037                                                         peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2038                                                                 msg: channel_update
2039                                                         });
2040                                                 }
2041                                                 self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
2042                                         }
2043                                         break Ok(());
2044                                 },
2045                                 hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), counterparty_node_id) })
2046                         }
2047                 };
2048
2049                 for htlc_source in failed_htlcs.drain(..) {
2050                         let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
2051                         let receiver = HTLCDestination::NextHopChannel { node_id: Some(*counterparty_node_id), channel_id: *channel_id };
2052                         self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
2053                 }
2054
2055                 let _ = handle_error!(self, result, *counterparty_node_id);
2056                 Ok(())
2057         }
2058
2059         /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
2060         /// will be accepted on the given channel, and after additional timeout/the closing of all
2061         /// pending HTLCs, the channel will be closed on chain.
2062         ///
2063         ///  * If we are the channel initiator, we will pay between our [`Background`] and
2064         ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
2065         ///    estimate.
2066         ///  * If our counterparty is the channel initiator, we will require a channel closing
2067         ///    transaction feerate of at least our [`Background`] feerate or the feerate which
2068         ///    would appear on a force-closure transaction, whichever is lower. We will allow our
2069         ///    counterparty to pay as much fee as they'd like, however.
2070         ///
2071         /// May generate a [`SendShutdown`] message event on success, which should be relayed.
2072         ///
2073         /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
2074         /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
2075         /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
2076         /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
2077         pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
2078                 self.close_channel_internal(channel_id, counterparty_node_id, None)
2079         }
2080
2081         /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
2082         /// will be accepted on the given channel, and after additional timeout/the closing of all
2083         /// pending HTLCs, the channel will be closed on chain.
2084         ///
2085         /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
2086         /// the channel being closed or not:
2087         ///  * If we are the channel initiator, we will pay at least this feerate on the closing
2088         ///    transaction. The upper-bound is set by
2089         ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
2090         ///    estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
2091         ///  * If our counterparty is the channel initiator, we will refuse to accept a channel closure
2092         ///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
2093         ///    will appear on a force-closure transaction, whichever is lower).
2094         ///
2095         /// May generate a [`SendShutdown`] message event on success, which should be relayed.
2096         ///
2097         /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
2098         /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
2099         /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
2100         /// [`SendShutdown`]: crate::events::MessageSendEvent::SendShutdown
2101         pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
2102                 self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
2103         }
2104
2105         #[inline]
2106         fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
2107                 let (monitor_update_option, mut failed_htlcs) = shutdown_res;
2108                 log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
2109                 for htlc_source in failed_htlcs.drain(..) {
2110                         let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
2111                         let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
2112                         let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
2113                         self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
2114                 }
2115                 if let Some((funding_txo, monitor_update)) = monitor_update_option {
2116                         // There isn't anything we can do if we get an update failure - we're already
2117                         // force-closing. The monitor update on the required in-memory copy should broadcast
2118                         // the latest local state, which is the best we can do anyway. Thus, it is safe to
2119                         // ignore the result here.
2120                         let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
2121                 }
2122         }
2123
2124         /// `peer_msg` should be set when we receive a message from a peer, but not set when the
2125         /// user closes, which will be re-exposed as the `ChannelClosed` reason.
2126         fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>, broadcast: bool)
2127         -> Result<PublicKey, APIError> {
2128                 let per_peer_state = self.per_peer_state.read().unwrap();
2129                 let peer_state_mutex = per_peer_state.get(peer_node_id)
2130                         .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", peer_node_id) })?;
2131                 let mut chan = {
2132                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2133                         let peer_state = &mut *peer_state_lock;
2134                         if let hash_map::Entry::Occupied(chan) = peer_state.channel_by_id.entry(channel_id.clone()) {
2135                                 if let Some(peer_msg) = peer_msg {
2136                                         self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString(peer_msg.to_string()) });
2137                                 } else {
2138                                         self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
2139                                 }
2140                                 remove_channel!(self, chan)
2141                         } else {
2142                                 return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
2143                         }
2144                 };
2145                 log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
2146                 self.finish_force_close_channel(chan.force_shutdown(broadcast));
2147                 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
2148                         let mut peer_state = peer_state_mutex.lock().unwrap();
2149                         peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2150                                 msg: update
2151                         });
2152                 }
2153
2154                 Ok(chan.get_counterparty_node_id())
2155         }
2156
2157         fn force_close_sending_error(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, broadcast: bool) -> Result<(), APIError> {
2158                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2159                 match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None, broadcast) {
2160                         Ok(counterparty_node_id) => {
2161                                 let per_peer_state = self.per_peer_state.read().unwrap();
2162                                 if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
2163                                         let mut peer_state = peer_state_mutex.lock().unwrap();
2164                                         peer_state.pending_msg_events.push(
2165                                                 events::MessageSendEvent::HandleError {
2166                                                         node_id: counterparty_node_id,
2167                                                         action: msgs::ErrorAction::SendErrorMessage {
2168                                                                 msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
2169                                                         },
2170                                                 }
2171                                         );
2172                                 }
2173                                 Ok(())
2174                         },
2175                         Err(e) => Err(e)
2176                 }
2177         }
2178
2179         /// Force closes a channel, immediately broadcasting the latest local transaction(s) and
2180         /// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
2181         /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
2182         /// channel.
2183         pub fn force_close_broadcasting_latest_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
2184         -> Result<(), APIError> {
2185                 self.force_close_sending_error(channel_id, counterparty_node_id, true)
2186         }
2187
2188         /// Force closes a channel, rejecting new HTLCs on the given channel but skips broadcasting
2189         /// the latest local transaction(s). Fails if `channel_id` is unknown to the manager, or if the
2190         /// `counterparty_node_id` isn't the counterparty of the corresponding channel.
2191         ///
2192         /// You can always get the latest local transaction(s) to broadcast from
2193         /// [`ChannelMonitor::get_latest_holder_commitment_txn`].
2194         pub fn force_close_without_broadcasting_txn(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey)
2195         -> Result<(), APIError> {
2196                 self.force_close_sending_error(channel_id, counterparty_node_id, false)
2197         }
2198
2199         /// Force close all channels, immediately broadcasting the latest local commitment transaction
2200         /// for each to the chain and rejecting new HTLCs on each.
2201         pub fn force_close_all_channels_broadcasting_latest_txn(&self) {
2202                 for chan in self.list_channels() {
2203                         let _ = self.force_close_broadcasting_latest_txn(&chan.channel_id, &chan.counterparty.node_id);
2204                 }
2205         }
2206
2207         /// Force close all channels rejecting new HTLCs on each but without broadcasting the latest
2208         /// local transaction(s).
2209         pub fn force_close_all_channels_without_broadcasting_txn(&self) {
2210                 for chan in self.list_channels() {
2211                         let _ = self.force_close_without_broadcasting_txn(&chan.channel_id, &chan.counterparty.node_id);
2212                 }
2213         }
2214
2215         fn construct_recv_pending_htlc_info(&self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32],
2216                 payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>) -> Result<PendingHTLCInfo, ReceiveError>
2217         {
2218                 // final_incorrect_cltv_expiry
2219                 if hop_data.outgoing_cltv_value > cltv_expiry {
2220                         return Err(ReceiveError {
2221                                 msg: "Upstream node set CLTV to less than the CLTV set by the sender",
2222                                 err_code: 18,
2223                                 err_data: cltv_expiry.to_be_bytes().to_vec()
2224                         })
2225                 }
2226                 // final_expiry_too_soon
2227                 // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
2228                 // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
2229                 //
2230                 // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
2231                 // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
2232                 // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
2233                 let current_height: u32 = self.best_block.read().unwrap().height();
2234                 if (hop_data.outgoing_cltv_value as u64) <= current_height as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1 {
2235                         let mut err_data = Vec::with_capacity(12);
2236                         err_data.extend_from_slice(&amt_msat.to_be_bytes());
2237                         err_data.extend_from_slice(&current_height.to_be_bytes());
2238                         return Err(ReceiveError {
2239                                 err_code: 0x4000 | 15, err_data,
2240                                 msg: "The final CLTV expiry is too soon to handle",
2241                         });
2242                 }
2243                 if hop_data.amt_to_forward > amt_msat {
2244                         return Err(ReceiveError {
2245                                 err_code: 19,
2246                                 err_data: amt_msat.to_be_bytes().to_vec(),
2247                                 msg: "Upstream node sent less than we were supposed to receive in payment",
2248                         });
2249                 }
2250
2251                 let routing = match hop_data.format {
2252                         msgs::OnionHopDataFormat::NonFinalNode { .. } => {
2253                                 return Err(ReceiveError {
2254                                         err_code: 0x4000|22,
2255                                         err_data: Vec::new(),
2256                                         msg: "Got non final data with an HMAC of 0",
2257                                 });
2258                         },
2259                         msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
2260                                 if payment_data.is_some() && keysend_preimage.is_some() {
2261                                         return Err(ReceiveError {
2262                                                 err_code: 0x4000|22,
2263                                                 err_data: Vec::new(),
2264                                                 msg: "We don't support MPP keysend payments",
2265                                         });
2266                                 } else if let Some(data) = payment_data {
2267                                         PendingHTLCRouting::Receive {
2268                                                 payment_data: data,
2269                                                 incoming_cltv_expiry: hop_data.outgoing_cltv_value,
2270                                                 phantom_shared_secret,
2271                                         }
2272                                 } else if let Some(payment_preimage) = keysend_preimage {
2273                                         // We need to check that the sender knows the keysend preimage before processing this
2274                                         // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
2275                                         // could discover the final destination of X, by probing the adjacent nodes on the route
2276                                         // with a keysend payment of identical payment hash to X and observing the processing
2277                                         // time discrepancies due to a hash collision with X.
2278                                         let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
2279                                         if hashed_preimage != payment_hash {
2280                                                 return Err(ReceiveError {
2281                                                         err_code: 0x4000|22,
2282                                                         err_data: Vec::new(),
2283                                                         msg: "Payment preimage didn't match payment hash",
2284                                                 });
2285                                         }
2286
2287                                         PendingHTLCRouting::ReceiveKeysend {
2288                                                 payment_preimage,
2289                                                 incoming_cltv_expiry: hop_data.outgoing_cltv_value,
2290                                         }
2291                                 } else {
2292                                         return Err(ReceiveError {
2293                                                 err_code: 0x4000|0x2000|3,
2294                                                 err_data: Vec::new(),
2295                                                 msg: "We require payment_secrets",
2296                                         });
2297                                 }
2298                         },
2299                 };
2300                 Ok(PendingHTLCInfo {
2301                         routing,
2302                         payment_hash,
2303                         incoming_shared_secret: shared_secret,
2304                         incoming_amt_msat: Some(amt_msat),
2305                         outgoing_amt_msat: hop_data.amt_to_forward,
2306                         outgoing_cltv_value: hop_data.outgoing_cltv_value,
2307                 })
2308         }
2309
2310         fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> PendingHTLCStatus {
2311                 macro_rules! return_malformed_err {
2312                         ($msg: expr, $err_code: expr) => {
2313                                 {
2314                                         log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2315                                         return PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
2316                                                 channel_id: msg.channel_id,
2317                                                 htlc_id: msg.htlc_id,
2318                                                 sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
2319                                                 failure_code: $err_code,
2320                                         }));
2321                                 }
2322                         }
2323                 }
2324
2325                 if let Err(_) = msg.onion_routing_packet.public_key {
2326                         return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
2327                 }
2328
2329                 let shared_secret = self.node_signer.ecdh(
2330                         Recipient::Node, &msg.onion_routing_packet.public_key.unwrap(), None
2331                 ).unwrap().secret_bytes();
2332
2333                 if msg.onion_routing_packet.version != 0 {
2334                         //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
2335                         //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
2336                         //the hash doesn't really serve any purpose - in the case of hashing all data, the
2337                         //receiving node would have to brute force to figure out which version was put in the
2338                         //packet by the node that send us the message, in the case of hashing the hop_data, the
2339                         //node knows the HMAC matched, so they already know what is there...
2340                         return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
2341                 }
2342                 macro_rules! return_err {
2343                         ($msg: expr, $err_code: expr, $data: expr) => {
2344                                 {
2345                                         log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2346                                         return PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
2347                                                 channel_id: msg.channel_id,
2348                                                 htlc_id: msg.htlc_id,
2349                                                 reason: HTLCFailReason::reason($err_code, $data.to_vec())
2350                                                         .get_encrypted_failure_packet(&shared_secret, &None),
2351                                         }));
2352                                 }
2353                         }
2354                 }
2355
2356                 let next_hop = match onion_utils::decode_next_payment_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
2357                         Ok(res) => res,
2358                         Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
2359                                 return_malformed_err!(err_msg, err_code);
2360                         },
2361                         Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
2362                                 return_err!(err_msg, err_code, &[0; 0]);
2363                         },
2364                 };
2365
2366                 let pending_forward_info = match next_hop {
2367                         onion_utils::Hop::Receive(next_hop_data) => {
2368                                 // OUR PAYMENT!
2369                                 match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) {
2370                                         Ok(info) => {
2371                                                 // Note that we could obviously respond immediately with an update_fulfill_htlc
2372                                                 // message, however that would leak that we are the recipient of this payment, so
2373                                                 // instead we stay symmetric with the forwarding case, only responding (after a
2374                                                 // delay) once they've send us a commitment_signed!
2375                                                 PendingHTLCStatus::Forward(info)
2376                                         },
2377                                         Err(ReceiveError { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
2378                                 }
2379                         },
2380                         onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
2381                                 let new_pubkey = msg.onion_routing_packet.public_key.unwrap();
2382                                 let outgoing_packet = msgs::OnionPacket {
2383                                         version: 0,
2384                                         public_key: onion_utils::next_hop_packet_pubkey(&self.secp_ctx, new_pubkey, &shared_secret),
2385                                         hop_data: new_packet_bytes,
2386                                         hmac: next_hop_hmac.clone(),
2387                                 };
2388
2389                                 let short_channel_id = match next_hop_data.format {
2390                                         msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
2391                                         msgs::OnionHopDataFormat::FinalNode { .. } => {
2392                                                 return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
2393                                         },
2394                                 };
2395
2396                                 PendingHTLCStatus::Forward(PendingHTLCInfo {
2397                                         routing: PendingHTLCRouting::Forward {
2398                                                 onion_packet: outgoing_packet,
2399                                                 short_channel_id,
2400                                         },
2401                                         payment_hash: msg.payment_hash.clone(),
2402                                         incoming_shared_secret: shared_secret,
2403                                         incoming_amt_msat: Some(msg.amount_msat),
2404                                         outgoing_amt_msat: next_hop_data.amt_to_forward,
2405                                         outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
2406                                 })
2407                         }
2408                 };
2409
2410                 if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref outgoing_amt_msat, ref outgoing_cltv_value, .. }) = &pending_forward_info {
2411                         // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
2412                         // with a short_channel_id of 0. This is important as various things later assume
2413                         // short_channel_id is non-0 in any ::Forward.
2414                         if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
2415                                 if let Some((err, mut code, chan_update)) = loop {
2416                                         let id_option = self.short_to_chan_info.read().unwrap().get(short_channel_id).cloned();
2417                                         let forwarding_chan_info_opt = match id_option {
2418                                                 None => { // unknown_next_peer
2419                                                         // Note that this is likely a timing oracle for detecting whether an scid is a
2420                                                         // phantom or an intercept.
2421                                                         if (self.default_configuration.accept_intercept_htlcs &&
2422                                                            fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)) ||
2423                                                            fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id, &self.genesis_hash)
2424                                                         {
2425                                                                 None
2426                                                         } else {
2427                                                                 break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2428                                                         }
2429                                                 },
2430                                                 Some((cp_id, id)) => Some((cp_id.clone(), id.clone())),
2431                                         };
2432                                         let chan_update_opt = if let Some((counterparty_node_id, forwarding_id)) = forwarding_chan_info_opt {
2433                                                 let per_peer_state = self.per_peer_state.read().unwrap();
2434                                                 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
2435                                                 if peer_state_mutex_opt.is_none() {
2436                                                         break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2437                                                 }
2438                                                 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
2439                                                 let peer_state = &mut *peer_state_lock;
2440                                                 let chan = match peer_state.channel_by_id.get_mut(&forwarding_id) {
2441                                                         None => {
2442                                                                 // Channel was removed. The short_to_chan_info and channel_by_id maps
2443                                                                 // have no consistency guarantees.
2444                                                                 break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2445                                                         },
2446                                                         Some(chan) => chan
2447                                                 };
2448                                                 if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
2449                                                         // Note that the behavior here should be identical to the above block - we
2450                                                         // should NOT reveal the existence or non-existence of a private channel if
2451                                                         // we don't allow forwards outbound over them.
2452                                                         break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
2453                                                 }
2454                                                 if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() {
2455                                                         // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
2456                                                         // "refuse to forward unless the SCID alias was used", so we pretend
2457                                                         // we don't have the channel here.
2458                                                         break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
2459                                                 }
2460                                                 let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
2461
2462                                                 // Note that we could technically not return an error yet here and just hope
2463                                                 // that the connection is reestablished or monitor updated by the time we get
2464                                                 // around to doing the actual forward, but better to fail early if we can and
2465                                                 // hopefully an attacker trying to path-trace payments cannot make this occur
2466                                                 // on a small/per-node/per-channel scale.
2467                                                 if !chan.is_live() { // channel_disabled
2468                                                         // If the channel_update we're going to return is disabled (i.e. the
2469                                                         // peer has been disabled for some time), return `channel_disabled`,
2470                                                         // otherwise return `temporary_channel_failure`.
2471                                                         if chan_update_opt.as_ref().map(|u| u.contents.flags & 2 == 2).unwrap_or(false) {
2472                                                                 break Some(("Forwarding channel has been disconnected for some time.", 0x1000 | 20, chan_update_opt));
2473                                                         } else {
2474                                                                 break Some(("Forwarding channel is not in a ready state.", 0x1000 | 7, chan_update_opt));
2475                                                         }
2476                                                 }
2477                                                 if *outgoing_amt_msat < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
2478                                                         break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
2479                                                 }
2480                                                 if let Err((err, code)) = chan.htlc_satisfies_config(&msg, *outgoing_amt_msat, *outgoing_cltv_value) {
2481                                                         break Some((err, code, chan_update_opt));
2482                                                 }
2483                                                 chan_update_opt
2484                                         } else {
2485                                                 if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + MIN_CLTV_EXPIRY_DELTA as u64 {
2486                                                         // We really should set `incorrect_cltv_expiry` here but as we're not
2487                                                         // forwarding over a real channel we can't generate a channel_update
2488                                                         // for it. Instead we just return a generic temporary_node_failure.
2489                                                         break Some((
2490                                                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
2491                                                                 0x2000 | 2, None,
2492                                                         ));
2493                                                 }
2494                                                 None
2495                                         };
2496
2497                                         let cur_height = self.best_block.read().unwrap().height() + 1;
2498                                         // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
2499                                         // but we want to be robust wrt to counterparty packet sanitization (see
2500                                         // HTLC_FAIL_BACK_BUFFER rationale).
2501                                         if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
2502                                                 break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
2503                                         }
2504                                         if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
2505                                                 break Some(("CLTV expiry is too far in the future", 21, None));
2506                                         }
2507                                         // If the HTLC expires ~now, don't bother trying to forward it to our
2508                                         // counterparty. They should fail it anyway, but we don't want to bother with
2509                                         // the round-trips or risk them deciding they definitely want the HTLC and
2510                                         // force-closing to ensure they get it if we're offline.
2511                                         // We previously had a much more aggressive check here which tried to ensure
2512                                         // our counterparty receives an HTLC which has *our* risk threshold met on it,
2513                                         // but there is no need to do that, and since we're a bit conservative with our
2514                                         // risk threshold it just results in failing to forward payments.
2515                                         if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
2516                                                 break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
2517                                         }
2518
2519                                         break None;
2520                                 }
2521                                 {
2522                                         let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
2523                                         if let Some(chan_update) = chan_update {
2524                                                 if code == 0x1000 | 11 || code == 0x1000 | 12 {
2525                                                         msg.amount_msat.write(&mut res).expect("Writes cannot fail");
2526                                                 }
2527                                                 else if code == 0x1000 | 13 {
2528                                                         msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
2529                                                 }
2530                                                 else if code == 0x1000 | 20 {
2531                                                         // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
2532                                                         0u16.write(&mut res).expect("Writes cannot fail");
2533                                                 }
2534                                                 (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
2535                                                 msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
2536                                                 chan_update.write(&mut res).expect("Writes cannot fail");
2537                                         } else if code & 0x1000 == 0x1000 {
2538                                                 // If we're trying to return an error that requires a `channel_update` but
2539                                                 // we're forwarding to a phantom or intercept "channel" (i.e. cannot
2540                                                 // generate an update), just use the generic "temporary_node_failure"
2541                                                 // instead.
2542                                                 code = 0x2000 | 2;
2543                                         }
2544                                         return_err!(err, code, &res.0[..]);
2545                                 }
2546                         }
2547                 }
2548
2549                 pending_forward_info
2550         }
2551
2552         /// Gets the current [`channel_update`] for the given channel. This first checks if the channel is
2553         /// public, and thus should be called whenever the result is going to be passed out in a
2554         /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
2555         ///
2556         /// Note that in [`internal_closing_signed`], this function is called without the `peer_state`
2557         /// corresponding to the channel's counterparty locked, as the channel been removed from the
2558         /// storage and the `peer_state` lock has been dropped.
2559         ///
2560         /// [`channel_update`]: msgs::ChannelUpdate
2561         /// [`internal_closing_signed`]: Self::internal_closing_signed
2562         fn get_channel_update_for_broadcast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
2563                 if !chan.should_announce() {
2564                         return Err(LightningError {
2565                                 err: "Cannot broadcast a channel_update for a private channel".to_owned(),
2566                                 action: msgs::ErrorAction::IgnoreError
2567                         });
2568                 }
2569                 if chan.get_short_channel_id().is_none() {
2570                         return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError});
2571                 }
2572                 log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
2573                 self.get_channel_update_for_unicast(chan)
2574         }
2575
2576         /// Gets the current [`channel_update`] for the given channel. This does not check if the channel
2577         /// is public (only returning an `Err` if the channel does not yet have an assigned SCID),
2578         /// and thus MUST NOT be called unless the recipient of the resulting message has already
2579         /// provided evidence that they know about the existence of the channel.
2580         ///
2581         /// Note that through [`internal_closing_signed`], this function is called without the
2582         /// `peer_state`  corresponding to the channel's counterparty locked, as the channel been
2583         /// removed from the storage and the `peer_state` lock has been dropped.
2584         ///
2585         /// [`channel_update`]: msgs::ChannelUpdate
2586         /// [`internal_closing_signed`]: Self::internal_closing_signed
2587         fn get_channel_update_for_unicast(&self, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
2588                 log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
2589                 let short_channel_id = match chan.get_short_channel_id().or(chan.latest_inbound_scid_alias()) {
2590                         None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
2591                         Some(id) => id,
2592                 };
2593
2594                 self.get_channel_update_for_onion(short_channel_id, chan)
2595         }
2596         fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
2597                 log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
2598                 let were_node_one = self.our_network_pubkey.serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
2599
2600                 let enabled = chan.is_usable() && match chan.channel_update_status() {
2601                         ChannelUpdateStatus::Enabled => true,
2602                         ChannelUpdateStatus::DisabledStaged(_) => true,
2603                         ChannelUpdateStatus::Disabled => false,
2604                         ChannelUpdateStatus::EnabledStaged(_) => false,
2605                 };
2606
2607                 let unsigned = msgs::UnsignedChannelUpdate {
2608                         chain_hash: self.genesis_hash,
2609                         short_channel_id,
2610                         timestamp: chan.get_update_time_counter(),
2611                         flags: (!were_node_one) as u8 | ((!enabled as u8) << 1),
2612                         cltv_expiry_delta: chan.get_cltv_expiry_delta(),
2613                         htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
2614                         htlc_maximum_msat: chan.get_announced_htlc_max_msat(),
2615                         fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
2616                         fee_proportional_millionths: chan.get_fee_proportional_millionths(),
2617                         excess_data: Vec::new(),
2618                 };
2619                 // Panic on failure to signal LDK should be restarted to retry signing the `ChannelUpdate`.
2620                 // If we returned an error and the `node_signer` cannot provide a signature for whatever
2621                 // reason`, we wouldn't be able to receive inbound payments through the corresponding
2622                 // channel.
2623                 let sig = self.node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelUpdate(&unsigned)).unwrap();
2624
2625                 Ok(msgs::ChannelUpdate {
2626                         signature: sig,
2627                         contents: unsigned
2628                 })
2629         }
2630
2631         #[cfg(test)]
2632         pub(crate) fn test_send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
2633                 let _lck = self.total_consistency_lock.read().unwrap();
2634                 self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv_bytes)
2635         }
2636
2637         fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_hash: &PaymentHash, recipient_onion: RecipientOnionFields, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>, session_priv_bytes: [u8; 32]) -> Result<(), APIError> {
2638                 // The top-level caller should hold the total_consistency_lock read lock.
2639                 debug_assert!(self.total_consistency_lock.try_write().is_err());
2640
2641                 log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
2642                 let prng_seed = self.entropy_source.get_secure_random_bytes();
2643                 let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
2644
2645                 let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
2646                         .map_err(|_| APIError::InvalidRoute{err: "Pubkey along hop was maliciously selected".to_owned()})?;
2647                 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, recipient_onion, cur_height, keysend_preimage)?;
2648                 if onion_utils::route_size_insane(&onion_payloads) {
2649                         return Err(APIError::InvalidRoute{err: "Route size too large considering onion data".to_owned()});
2650                 }
2651                 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
2652
2653                 let err: Result<(), _> = loop {
2654                         let (counterparty_node_id, id) = match self.short_to_chan_info.read().unwrap().get(&path.first().unwrap().short_channel_id) {
2655                                 None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
2656                                 Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
2657                         };
2658
2659                         let per_peer_state = self.per_peer_state.read().unwrap();
2660                         let peer_state_mutex = per_peer_state.get(&counterparty_node_id)
2661                                 .ok_or_else(|| APIError::ChannelUnavailable{err: "No peer matching the path's first hop found!".to_owned() })?;
2662                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2663                         let peer_state = &mut *peer_state_lock;
2664                         if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(id) {
2665                                 if !chan.get().is_live() {
2666                                         return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()});
2667                                 }
2668                                 let funding_txo = chan.get().get_funding_txo().unwrap();
2669                                 let send_res = chan.get_mut().send_htlc_and_commit(htlc_msat, payment_hash.clone(),
2670                                         htlc_cltv, HTLCSource::OutboundRoute {
2671                                                 path: path.clone(),
2672                                                 session_priv: session_priv.clone(),
2673                                                 first_hop_htlc_msat: htlc_msat,
2674                                                 payment_id,
2675                                         }, onion_packet, &self.logger);
2676                                 match break_chan_entry!(self, send_res, chan) {
2677                                         Some(monitor_update) => {
2678                                                 let update_id = monitor_update.update_id;
2679                                                 let update_res = self.chain_monitor.update_channel(funding_txo, monitor_update);
2680                                                 if let Err(e) = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan) {
2681                                                         break Err(e);
2682                                                 }
2683                                                 if update_res == ChannelMonitorUpdateStatus::InProgress {
2684                                                         // Note that MonitorUpdateInProgress here indicates (per function
2685                                                         // docs) that we will resend the commitment update once monitor
2686                                                         // updating completes. Therefore, we must return an error
2687                                                         // indicating that it is unsafe to retry the payment wholesale,
2688                                                         // which we do in the send_payment check for
2689                                                         // MonitorUpdateInProgress, below.
2690                                                         return Err(APIError::MonitorUpdateInProgress);
2691                                                 }
2692                                         },
2693                                         None => { },
2694                                 }
2695                         } else {
2696                                 // The channel was likely removed after we fetched the id from the
2697                                 // `short_to_chan_info` map, but before we successfully locked the
2698                                 // `channel_by_id` map.
2699                                 // This can occur as no consistency guarantees exists between the two maps.
2700                                 return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()});
2701                         }
2702                         return Ok(());
2703                 };
2704
2705                 match handle_error!(self, err, path.first().unwrap().pubkey) {
2706                         Ok(_) => unreachable!(),
2707                         Err(e) => {
2708                                 Err(APIError::ChannelUnavailable { err: e.err })
2709                         },
2710                 }
2711         }
2712
2713         /// Sends a payment along a given route.
2714         ///
2715         /// Value parameters are provided via the last hop in route, see documentation for [`RouteHop`]
2716         /// fields for more info.
2717         ///
2718         /// May generate [`UpdateHTLCs`] message(s) event on success, which should be relayed (e.g. via
2719         /// [`PeerManager::process_events`]).
2720         ///
2721         /// # Avoiding Duplicate Payments
2722         ///
2723         /// If a pending payment is currently in-flight with the same [`PaymentId`] provided, this
2724         /// method will error with an [`APIError::InvalidRoute`]. Note, however, that once a payment
2725         /// is no longer pending (either via [`ChannelManager::abandon_payment`], or handling of an
2726         /// [`Event::PaymentSent`] or [`Event::PaymentFailed`]) LDK will not stop you from sending a
2727         /// second payment with the same [`PaymentId`].
2728         ///
2729         /// Thus, in order to ensure duplicate payments are not sent, you should implement your own
2730         /// tracking of payments, including state to indicate once a payment has completed. Because you
2731         /// should also ensure that [`PaymentHash`]es are not re-used, for simplicity, you should
2732         /// consider using the [`PaymentHash`] as the key for tracking payments. In that case, the
2733         /// [`PaymentId`] should be a copy of the [`PaymentHash`] bytes.
2734         ///
2735         /// Additionally, in the scenario where we begin the process of sending a payment, but crash
2736         /// before `send_payment` returns (or prior to [`ChannelMonitorUpdate`] persistence if you're
2737         /// using [`ChannelMonitorUpdateStatus::InProgress`]), the payment may be lost on restart. See
2738         /// [`ChannelManager::list_recent_payments`] for more information.
2739         ///
2740         /// # Possible Error States on [`PaymentSendFailure`]
2741         ///
2742         /// Each path may have a different return value, and [`PaymentSendFailure`] may return a `Vec` with
2743         /// each entry matching the corresponding-index entry in the route paths, see
2744         /// [`PaymentSendFailure`] for more info.
2745         ///
2746         /// In general, a path may raise:
2747         ///  * [`APIError::InvalidRoute`] when an invalid route or forwarding parameter (cltv_delta, fee,
2748         ///    node public key) is specified.
2749         ///  * [`APIError::ChannelUnavailable`] if the next-hop channel is not available for updates
2750         ///    (including due to previous monitor update failure or new permanent monitor update
2751         ///    failure).
2752         ///  * [`APIError::MonitorUpdateInProgress`] if a new monitor update failure prevented sending the
2753         ///    relevant updates.
2754         ///
2755         /// Note that depending on the type of the [`PaymentSendFailure`] the HTLC may have been
2756         /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
2757         /// different route unless you intend to pay twice!
2758         ///
2759         /// [`Event::PaymentSent`]: events::Event::PaymentSent
2760         /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
2761         /// [`UpdateHTLCs`]: events::MessageSendEvent::UpdateHTLCs
2762         /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
2763         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
2764         pub fn send_payment_with_route(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
2765                 let best_block_height = self.best_block.read().unwrap().height();
2766                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2767                 self.pending_outbound_payments
2768                         .send_payment_with_route(route, payment_hash, recipient_onion, payment_id, &self.entropy_source, &self.node_signer, best_block_height,
2769                                 |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
2770                                 self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
2771         }
2772
2773         /// Similar to [`ChannelManager::send_payment`], but will automatically find a route based on
2774         /// `route_params` and retry failed payment paths based on `retry_strategy`.
2775         pub fn send_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<(), RetryableSendFailure> {
2776                 let best_block_height = self.best_block.read().unwrap().height();
2777                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2778                 self.pending_outbound_payments
2779                         .send_payment(payment_hash, recipient_onion, payment_id, retry_strategy, route_params,
2780                                 &self.router, self.list_usable_channels(), || self.compute_inflight_htlcs(),
2781                                 &self.entropy_source, &self.node_signer, best_block_height, &self.logger,
2782                                 &self.pending_events,
2783                                 |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
2784                                 self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
2785         }
2786
2787         #[cfg(test)]
2788         pub(super) fn test_send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, keysend_preimage: Option<PaymentPreimage>, payment_id: PaymentId, recv_value_msat: Option<u64>, onion_session_privs: Vec<[u8; 32]>) -> Result<(), PaymentSendFailure> {
2789                 let best_block_height = self.best_block.read().unwrap().height();
2790                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2791                 self.pending_outbound_payments.test_send_payment_internal(route, payment_hash, recipient_onion, keysend_preimage, payment_id, recv_value_msat, onion_session_privs, &self.node_signer, best_block_height,
2792                         |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
2793                         self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
2794         }
2795
2796         #[cfg(test)]
2797         pub(crate) fn test_add_new_pending_payment(&self, payment_hash: PaymentHash, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route: &Route) -> Result<Vec<[u8; 32]>, PaymentSendFailure> {
2798                 let best_block_height = self.best_block.read().unwrap().height();
2799                 self.pending_outbound_payments.test_add_new_pending_payment(payment_hash, recipient_onion, payment_id, route, None, &self.entropy_source, best_block_height)
2800         }
2801
2802
2803         /// Signals that no further retries for the given payment should occur. Useful if you have a
2804         /// pending outbound payment with retries remaining, but wish to stop retrying the payment before
2805         /// retries are exhausted.
2806         ///
2807         /// If no [`Event::PaymentFailed`] event had been generated before, one will be generated as soon
2808         /// as there are no remaining pending HTLCs for this payment.
2809         ///
2810         /// Note that calling this method does *not* prevent a payment from succeeding. You must still
2811         /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
2812         /// determine the ultimate status of a payment.
2813         ///
2814         /// If an [`Event::PaymentFailed`] event is generated and we restart without this
2815         /// [`ChannelManager`] having been persisted, another [`Event::PaymentFailed`] may be generated.
2816         ///
2817         /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
2818         /// [`Event::PaymentSent`]: events::Event::PaymentSent
2819         pub fn abandon_payment(&self, payment_id: PaymentId) {
2820                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2821                 self.pending_outbound_payments.abandon_payment(payment_id, PaymentFailureReason::UserAbandoned, &self.pending_events);
2822         }
2823
2824         /// Send a spontaneous payment, which is a payment that does not require the recipient to have
2825         /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
2826         /// the preimage, it must be a cryptographically secure random value that no intermediate node
2827         /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
2828         /// never reach the recipient.
2829         ///
2830         /// See [`send_payment`] documentation for more details on the return value of this function
2831         /// and idempotency guarantees provided by the [`PaymentId`] key.
2832         ///
2833         /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
2834         /// [`send_payment`] for more information about the risks of duplicate preimage usage.
2835         ///
2836         /// Note that `route` must have exactly one path.
2837         ///
2838         /// [`send_payment`]: Self::send_payment
2839         pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId) -> Result<PaymentHash, PaymentSendFailure> {
2840                 let best_block_height = self.best_block.read().unwrap().height();
2841                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2842                 self.pending_outbound_payments.send_spontaneous_payment_with_route(
2843                         route, payment_preimage, recipient_onion, payment_id, &self.entropy_source,
2844                         &self.node_signer, best_block_height,
2845                         |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
2846                         self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
2847         }
2848
2849         /// Similar to [`ChannelManager::send_spontaneous_payment`], but will automatically find a route
2850         /// based on `route_params` and retry failed payment paths based on `retry_strategy`.
2851         ///
2852         /// See [`PaymentParameters::for_keysend`] for help in constructing `route_params` for spontaneous
2853         /// payments.
2854         ///
2855         /// [`PaymentParameters::for_keysend`]: crate::routing::router::PaymentParameters::for_keysend
2856         pub fn send_spontaneous_payment_with_retry(&self, payment_preimage: Option<PaymentPreimage>, recipient_onion: RecipientOnionFields, payment_id: PaymentId, route_params: RouteParameters, retry_strategy: Retry) -> Result<PaymentHash, RetryableSendFailure> {
2857                 let best_block_height = self.best_block.read().unwrap().height();
2858                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2859                 self.pending_outbound_payments.send_spontaneous_payment(payment_preimage, recipient_onion,
2860                         payment_id, retry_strategy, route_params, &self.router, self.list_usable_channels(),
2861                         || self.compute_inflight_htlcs(),  &self.entropy_source, &self.node_signer, best_block_height,
2862                         &self.logger, &self.pending_events,
2863                         |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
2864                         self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
2865         }
2866
2867         /// Send a payment that is probing the given route for liquidity. We calculate the
2868         /// [`PaymentHash`] of probes based on a static secret and a random [`PaymentId`], which allows
2869         /// us to easily discern them from real payments.
2870         pub fn send_probe(&self, hops: Vec<RouteHop>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
2871                 let best_block_height = self.best_block.read().unwrap().height();
2872                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2873                 self.pending_outbound_payments.send_probe(hops, self.probing_cookie_secret, &self.entropy_source, &self.node_signer, best_block_height,
2874                         |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
2875                         self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv))
2876         }
2877
2878         /// Returns whether a payment with the given [`PaymentHash`] and [`PaymentId`] is, in fact, a
2879         /// payment probe.
2880         #[cfg(test)]
2881         pub(crate) fn payment_is_probe(&self, payment_hash: &PaymentHash, payment_id: &PaymentId) -> bool {
2882                 outbound_payment::payment_is_probe(payment_hash, payment_id, self.probing_cookie_secret)
2883         }
2884
2885         /// Handles the generation of a funding transaction, optionally (for tests) with a function
2886         /// which checks the correctness of the funding transaction given the associated channel.
2887         fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<<SP::Target as SignerProvider>::Signer>, &Transaction) -> Result<OutPoint, APIError>>(
2888                 &self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
2889         ) -> Result<(), APIError> {
2890                 let per_peer_state = self.per_peer_state.read().unwrap();
2891                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
2892                         .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
2893
2894                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
2895                 let peer_state = &mut *peer_state_lock;
2896                 let (msg, chan) = match peer_state.channel_by_id.remove(temporary_channel_id) {
2897                         Some(mut chan) => {
2898                                 let funding_txo = find_funding_output(&chan, &funding_transaction)?;
2899
2900                                 let funding_res = chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
2901                                         .map_err(|e| if let ChannelError::Close(msg) = e {
2902                                                 MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
2903                                         } else { unreachable!(); });
2904                                 match funding_res {
2905                                         Ok(funding_msg) => (funding_msg, chan),
2906                                         Err(_) => {
2907                                                 mem::drop(peer_state_lock);
2908                                                 mem::drop(per_peer_state);
2909
2910                                                 let _ = handle_error!(self, funding_res, chan.get_counterparty_node_id());
2911                                                 return Err(APIError::ChannelUnavailable {
2912                                                         err: "Signer refused to sign the initial commitment transaction".to_owned()
2913                                                 });
2914                                         },
2915                                 }
2916                         },
2917                         None => {
2918                                 return Err(APIError::ChannelUnavailable {
2919                                         err: format!(
2920                                                 "Channel with id {} not found for the passed counterparty node_id {}",
2921                                                 log_bytes!(*temporary_channel_id), counterparty_node_id),
2922                                 })
2923                         },
2924                 };
2925
2926                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
2927                         node_id: chan.get_counterparty_node_id(),
2928                         msg,
2929                 });
2930                 match peer_state.channel_by_id.entry(chan.channel_id()) {
2931                         hash_map::Entry::Occupied(_) => {
2932                                 panic!("Generated duplicate funding txid?");
2933                         },
2934                         hash_map::Entry::Vacant(e) => {
2935                                 let mut id_to_peer = self.id_to_peer.lock().unwrap();
2936                                 if id_to_peer.insert(chan.channel_id(), chan.get_counterparty_node_id()).is_some() {
2937                                         panic!("id_to_peer map already contained funding txid, which shouldn't be possible");
2938                                 }
2939                                 e.insert(chan);
2940                         }
2941                 }
2942                 Ok(())
2943         }
2944
2945         #[cfg(test)]
2946         pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
2947                 self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
2948                         Ok(OutPoint { txid: tx.txid(), index: output_index })
2949                 })
2950         }
2951
2952         /// Call this upon creation of a funding transaction for the given channel.
2953         ///
2954         /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
2955         /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
2956         ///
2957         /// Returns [`APIError::APIMisuseError`] if the funding transaction is not final for propagation
2958         /// across the p2p network.
2959         ///
2960         /// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
2961         /// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
2962         ///
2963         /// May panic if the output found in the funding transaction is duplicative with some other
2964         /// channel (note that this should be trivially prevented by using unique funding transaction
2965         /// keys per-channel).
2966         ///
2967         /// Do NOT broadcast the funding transaction yourself. When we have safely received our
2968         /// counterparty's signature the funding transaction will automatically be broadcast via the
2969         /// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
2970         ///
2971         /// Note that this includes RBF or similar transaction replacement strategies - lightning does
2972         /// not currently support replacing a funding transaction on an existing channel. Instead,
2973         /// create a new channel with a conflicting funding transaction.
2974         ///
2975         /// Note to keep the miner incentives aligned in moving the blockchain forward, we recommend
2976         /// the wallet software generating the funding transaction to apply anti-fee sniping as
2977         /// implemented by Bitcoin Core wallet. See <https://bitcoinops.org/en/topics/fee-sniping/>
2978         /// for more details.
2979         ///
2980         /// [`Event::FundingGenerationReady`]: crate::events::Event::FundingGenerationReady
2981         /// [`Event::ChannelClosed`]: crate::events::Event::ChannelClosed
2982         pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
2983                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2984
2985                 for inp in funding_transaction.input.iter() {
2986                         if inp.witness.is_empty() {
2987                                 return Err(APIError::APIMisuseError {
2988                                         err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
2989                                 });
2990                         }
2991                 }
2992                 {
2993                         let height = self.best_block.read().unwrap().height();
2994                         // Transactions are evaluated as final by network mempools at the next block. However, the modules
2995                         // constituting our Lightning node might not have perfect sync about their blockchain views. Thus, if
2996                         // the wallet module is in advance on the LDK view, allow one more block of headroom.
2997                         if !funding_transaction.input.iter().all(|input| input.sequence == Sequence::MAX) && LockTime::from(funding_transaction.lock_time).is_block_height() && funding_transaction.lock_time.0 > height + 2 {
2998                                 return Err(APIError::APIMisuseError {
2999                                         err: "Funding transaction absolute timelock is non-final".to_owned()
3000                                 });
3001                         }
3002                 }
3003                 self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
3004                         let mut output_index = None;
3005                         let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
3006                         for (idx, outp) in tx.output.iter().enumerate() {
3007                                 if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
3008                                         if output_index.is_some() {
3009                                                 return Err(APIError::APIMisuseError {
3010                                                         err: "Multiple outputs matched the expected script and value".to_owned()
3011                                                 });
3012                                         }
3013                                         if idx > u16::max_value() as usize {
3014                                                 return Err(APIError::APIMisuseError {
3015                                                         err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
3016                                                 });
3017                                         }
3018                                         output_index = Some(idx as u16);
3019                                 }
3020                         }
3021                         if output_index.is_none() {
3022                                 return Err(APIError::APIMisuseError {
3023                                         err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
3024                                 });
3025                         }
3026                         Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
3027                 })
3028         }
3029
3030         /// Atomically updates the [`ChannelConfig`] for the given channels.
3031         ///
3032         /// Once the updates are applied, each eligible channel (advertised with a known short channel
3033         /// ID and a change in [`forwarding_fee_proportional_millionths`], [`forwarding_fee_base_msat`],
3034         /// or [`cltv_expiry_delta`]) has a [`BroadcastChannelUpdate`] event message generated
3035         /// containing the new [`ChannelUpdate`] message which should be broadcast to the network.
3036         ///
3037         /// Returns [`ChannelUnavailable`] when a channel is not found or an incorrect
3038         /// `counterparty_node_id` is provided.
3039         ///
3040         /// Returns [`APIMisuseError`] when a [`cltv_expiry_delta`] update is to be applied with a value
3041         /// below [`MIN_CLTV_EXPIRY_DELTA`].
3042         ///
3043         /// If an error is returned, none of the updates should be considered applied.
3044         ///
3045         /// [`forwarding_fee_proportional_millionths`]: ChannelConfig::forwarding_fee_proportional_millionths
3046         /// [`forwarding_fee_base_msat`]: ChannelConfig::forwarding_fee_base_msat
3047         /// [`cltv_expiry_delta`]: ChannelConfig::cltv_expiry_delta
3048         /// [`BroadcastChannelUpdate`]: events::MessageSendEvent::BroadcastChannelUpdate
3049         /// [`ChannelUpdate`]: msgs::ChannelUpdate
3050         /// [`ChannelUnavailable`]: APIError::ChannelUnavailable
3051         /// [`APIMisuseError`]: APIError::APIMisuseError
3052         pub fn update_channel_config(
3053                 &self, counterparty_node_id: &PublicKey, channel_ids: &[[u8; 32]], config: &ChannelConfig,
3054         ) -> Result<(), APIError> {
3055                 if config.cltv_expiry_delta < MIN_CLTV_EXPIRY_DELTA {
3056                         return Err(APIError::APIMisuseError {
3057                                 err: format!("The chosen CLTV expiry delta is below the minimum of {}", MIN_CLTV_EXPIRY_DELTA),
3058                         });
3059                 }
3060
3061                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(
3062                         &self.total_consistency_lock, &self.persistence_notifier,
3063                 );
3064                 let per_peer_state = self.per_peer_state.read().unwrap();
3065                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
3066                         .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
3067                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3068                 let peer_state = &mut *peer_state_lock;
3069                 for channel_id in channel_ids {
3070                         if !peer_state.channel_by_id.contains_key(channel_id) {
3071                                 return Err(APIError::ChannelUnavailable {
3072                                         err: format!("Channel with ID {} was not found for the passed counterparty_node_id {}", log_bytes!(*channel_id), counterparty_node_id),
3073                                 });
3074                         }
3075                 }
3076                 for channel_id in channel_ids {
3077                         let channel = peer_state.channel_by_id.get_mut(channel_id).unwrap();
3078                         if !channel.update_config(config) {
3079                                 continue;
3080                         }
3081                         if let Ok(msg) = self.get_channel_update_for_broadcast(channel) {
3082                                 peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg });
3083                         } else if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
3084                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
3085                                         node_id: channel.get_counterparty_node_id(),
3086                                         msg,
3087                                 });
3088                         }
3089                 }
3090                 Ok(())
3091         }
3092
3093         /// Attempts to forward an intercepted HTLC over the provided channel id and with the provided
3094         /// amount to forward. Should only be called in response to an [`HTLCIntercepted`] event.
3095         ///
3096         /// Intercepted HTLCs can be useful for Lightning Service Providers (LSPs) to open a just-in-time
3097         /// channel to a receiving node if the node lacks sufficient inbound liquidity.
3098         ///
3099         /// To make use of intercepted HTLCs, set [`UserConfig::accept_intercept_htlcs`] and use
3100         /// [`ChannelManager::get_intercept_scid`] to generate short channel id(s) to put in the
3101         /// receiver's invoice route hints. These route hints will signal to LDK to generate an
3102         /// [`HTLCIntercepted`] event when it receives the forwarded HTLC, and this method or
3103         /// [`ChannelManager::fail_intercepted_htlc`] MUST be called in response to the event.
3104         ///
3105         /// Note that LDK does not enforce fee requirements in `amt_to_forward_msat`, and will not stop
3106         /// you from forwarding more than you received.
3107         ///
3108         /// Errors if the event was not handled in time, in which case the HTLC was automatically failed
3109         /// backwards.
3110         ///
3111         /// [`UserConfig::accept_intercept_htlcs`]: crate::util::config::UserConfig::accept_intercept_htlcs
3112         /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
3113         // TODO: when we move to deciding the best outbound channel at forward time, only take
3114         // `next_node_id` and not `next_hop_channel_id`
3115         pub fn forward_intercepted_htlc(&self, intercept_id: InterceptId, next_hop_channel_id: &[u8; 32], next_node_id: PublicKey, amt_to_forward_msat: u64) -> Result<(), APIError> {
3116                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
3117
3118                 let next_hop_scid = {
3119                         let peer_state_lock = self.per_peer_state.read().unwrap();
3120                         let peer_state_mutex = peer_state_lock.get(&next_node_id)
3121                                 .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", next_node_id) })?;
3122                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3123                         let peer_state = &mut *peer_state_lock;
3124                         match peer_state.channel_by_id.get(next_hop_channel_id) {
3125                                 Some(chan) => {
3126                                         if !chan.is_usable() {
3127                                                 return Err(APIError::ChannelUnavailable {
3128                                                         err: format!("Channel with id {} not fully established", log_bytes!(*next_hop_channel_id))
3129                                                 })
3130                                         }
3131                                         chan.get_short_channel_id().unwrap_or(chan.outbound_scid_alias())
3132                                 },
3133                                 None => return Err(APIError::ChannelUnavailable {
3134                                         err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*next_hop_channel_id), next_node_id)
3135                                 })
3136                         }
3137                 };
3138
3139                 let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
3140                         .ok_or_else(|| APIError::APIMisuseError {
3141                                 err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
3142                         })?;
3143
3144                 let routing = match payment.forward_info.routing {
3145                         PendingHTLCRouting::Forward { onion_packet, .. } => {
3146                                 PendingHTLCRouting::Forward { onion_packet, short_channel_id: next_hop_scid }
3147                         },
3148                         _ => unreachable!() // Only `PendingHTLCRouting::Forward`s are intercepted
3149                 };
3150                 let pending_htlc_info = PendingHTLCInfo {
3151                         outgoing_amt_msat: amt_to_forward_msat, routing, ..payment.forward_info
3152                 };
3153
3154                 let mut per_source_pending_forward = [(
3155                         payment.prev_short_channel_id,
3156                         payment.prev_funding_outpoint,
3157                         payment.prev_user_channel_id,
3158                         vec![(pending_htlc_info, payment.prev_htlc_id)]
3159                 )];
3160                 self.forward_htlcs(&mut per_source_pending_forward);
3161                 Ok(())
3162         }
3163
3164         /// Fails the intercepted HTLC indicated by intercept_id. Should only be called in response to
3165         /// an [`HTLCIntercepted`] event. See [`ChannelManager::forward_intercepted_htlc`].
3166         ///
3167         /// Errors if the event was not handled in time, in which case the HTLC was automatically failed
3168         /// backwards.
3169         ///
3170         /// [`HTLCIntercepted`]: events::Event::HTLCIntercepted
3171         pub fn fail_intercepted_htlc(&self, intercept_id: InterceptId) -> Result<(), APIError> {
3172                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
3173
3174                 let payment = self.pending_intercepted_htlcs.lock().unwrap().remove(&intercept_id)
3175                         .ok_or_else(|| APIError::APIMisuseError {
3176                                 err: format!("Payment with intercept id {} not found", log_bytes!(intercept_id.0))
3177                         })?;
3178
3179                 if let PendingHTLCRouting::Forward { short_channel_id, .. } = payment.forward_info.routing {
3180                         let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
3181                                 short_channel_id: payment.prev_short_channel_id,
3182                                 outpoint: payment.prev_funding_outpoint,
3183                                 htlc_id: payment.prev_htlc_id,
3184                                 incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret,
3185                                 phantom_shared_secret: None,
3186                         });
3187
3188                         let failure_reason = HTLCFailReason::from_failure_code(0x4000 | 10);
3189                         let destination = HTLCDestination::UnknownNextHop { requested_forward_scid: short_channel_id };
3190                         self.fail_htlc_backwards_internal(&htlc_source, &payment.forward_info.payment_hash, &failure_reason, destination);
3191                 } else { unreachable!() } // Only `PendingHTLCRouting::Forward`s are intercepted
3192
3193                 Ok(())
3194         }
3195
3196         /// Processes HTLCs which are pending waiting on random forward delay.
3197         ///
3198         /// Should only really ever be called in response to a PendingHTLCsForwardable event.
3199         /// Will likely generate further events.
3200         pub fn process_pending_htlc_forwards(&self) {
3201                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
3202
3203                 let mut new_events = Vec::new();
3204                 let mut failed_forwards = Vec::new();
3205                 let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
3206                 {
3207                         let mut forward_htlcs = HashMap::new();
3208                         mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap());
3209
3210                         for (short_chan_id, mut pending_forwards) in forward_htlcs {
3211                                 if short_chan_id != 0 {
3212                                         macro_rules! forwarding_channel_not_found {
3213                                                 () => {
3214                                                         for forward_info in pending_forwards.drain(..) {
3215                                                                 match forward_info {
3216                                                                         HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
3217                                                                                 prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
3218                                                                                 forward_info: PendingHTLCInfo {
3219                                                                                         routing, incoming_shared_secret, payment_hash, outgoing_amt_msat,
3220                                                                                         outgoing_cltv_value, incoming_amt_msat: _
3221                                                                                 }
3222                                                                         }) => {
3223                                                                                 macro_rules! failure_handler {
3224                                                                                         ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => {
3225                                                                                                 log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
3226
3227                                                                                                 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
3228                                                                                                         short_channel_id: prev_short_channel_id,
3229                                                                                                         outpoint: prev_funding_outpoint,
3230                                                                                                         htlc_id: prev_htlc_id,
3231                                                                                                         incoming_packet_shared_secret: incoming_shared_secret,
3232                                                                                                         phantom_shared_secret: $phantom_ss,
3233                                                                                                 });
3234
3235                                                                                                 let reason = if $next_hop_unknown {
3236                                                                                                         HTLCDestination::UnknownNextHop { requested_forward_scid: short_chan_id }
3237                                                                                                 } else {
3238                                                                                                         HTLCDestination::FailedPayment{ payment_hash }
3239                                                                                                 };
3240
3241                                                                                                 failed_forwards.push((htlc_source, payment_hash,
3242                                                                                                         HTLCFailReason::reason($err_code, $err_data),
3243                                                                                                         reason
3244                                                                                                 ));
3245                                                                                                 continue;
3246                                                                                         }
3247                                                                                 }
3248                                                                                 macro_rules! fail_forward {
3249                                                                                         ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
3250                                                                                                 {
3251                                                                                                         failure_handler!($msg, $err_code, $err_data, $phantom_ss, true);
3252                                                                                                 }
3253                                                                                         }
3254                                                                                 }
3255                                                                                 macro_rules! failed_payment {
3256                                                                                         ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
3257                                                                                                 {
3258                                                                                                         failure_handler!($msg, $err_code, $err_data, $phantom_ss, false);
3259                                                                                                 }
3260                                                                                         }
3261                                                                                 }
3262                                                                                 if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
3263                                                                                         let phantom_pubkey_res = self.node_signer.get_node_id(Recipient::PhantomNode);
3264                                                                                         if phantom_pubkey_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id, &self.genesis_hash) {
3265                                                                                                 let phantom_shared_secret = self.node_signer.ecdh(Recipient::PhantomNode, &onion_packet.public_key.unwrap(), None).unwrap().secret_bytes();
3266                                                                                                 let next_hop = match onion_utils::decode_next_payment_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
3267                                                                                                         Ok(res) => res,
3268                                                                                                         Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
3269                                                                                                                 let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
3270                                                                                                                 // In this scenario, the phantom would have sent us an
3271                                                                                                                 // `update_fail_malformed_htlc`, meaning here we encrypt the error as
3272                                                                                                                 // if it came from us (the second-to-last hop) but contains the sha256
3273                                                                                                                 // of the onion.
3274                                                                                                                 failed_payment!(err_msg, err_code, sha256_of_onion.to_vec(), None);
3275                                                                                                         },
3276                                                                                                         Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
3277                                                                                                                 failed_payment!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
3278                                                                                                         },
3279                                                                                                 };
3280                                                                                                 match next_hop {
3281                                                                                                         onion_utils::Hop::Receive(hop_data) => {
3282                                                                                                                 match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, Some(phantom_shared_secret)) {
3283                                                                                                                         Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])),
3284                                                                                                                         Err(ReceiveError { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret))
3285                                                                                                                 }
3286                                                                                                         },
3287                                                                                                         _ => panic!(),
3288                                                                                                 }
3289                                                                                         } else {
3290                                                                                                 fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
3291                                                                                         }
3292                                                                                 } else {
3293                                                                                         fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
3294                                                                                 }
3295                                                                         },
3296                                                                         HTLCForwardInfo::FailHTLC { .. } => {
3297                                                                                 // Channel went away before we could fail it. This implies
3298                                                                                 // the channel is now on chain and our counterparty is
3299                                                                                 // trying to broadcast the HTLC-Timeout, but that's their
3300                                                                                 // problem, not ours.
3301                                                                         }
3302                                                                 }
3303                                                         }
3304                                                 }
3305                                         }
3306                                         let (counterparty_node_id, forward_chan_id) = match self.short_to_chan_info.read().unwrap().get(&short_chan_id) {
3307                                                 Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
3308                                                 None => {
3309                                                         forwarding_channel_not_found!();
3310                                                         continue;
3311                                                 }
3312                                         };
3313                                         let per_peer_state = self.per_peer_state.read().unwrap();
3314                                         let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
3315                                         if peer_state_mutex_opt.is_none() {
3316                                                 forwarding_channel_not_found!();
3317                                                 continue;
3318                                         }
3319                                         let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
3320                                         let peer_state = &mut *peer_state_lock;
3321                                         match peer_state.channel_by_id.entry(forward_chan_id) {
3322                                                 hash_map::Entry::Vacant(_) => {
3323                                                         forwarding_channel_not_found!();
3324                                                         continue;
3325                                                 },
3326                                                 hash_map::Entry::Occupied(mut chan) => {
3327                                                         for forward_info in pending_forwards.drain(..) {
3328                                                                 match forward_info {
3329                                                                         HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
3330                                                                                 prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id: _,
3331                                                                                 forward_info: PendingHTLCInfo {
3332                                                                                         incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value,
3333                                                                                         routing: PendingHTLCRouting::Forward { onion_packet, .. }, incoming_amt_msat: _,
3334                                                                                 },
3335                                                                         }) => {
3336                                                                                 log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
3337                                                                                 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
3338                                                                                         short_channel_id: prev_short_channel_id,
3339                                                                                         outpoint: prev_funding_outpoint,
3340                                                                                         htlc_id: prev_htlc_id,
3341                                                                                         incoming_packet_shared_secret: incoming_shared_secret,
3342                                                                                         // Phantom payments are only PendingHTLCRouting::Receive.
3343                                                                                         phantom_shared_secret: None,
3344                                                                                 });
3345                                                                                 if let Err(e) = chan.get_mut().queue_add_htlc(outgoing_amt_msat,
3346                                                                                         payment_hash, outgoing_cltv_value, htlc_source.clone(),
3347                                                                                         onion_packet, &self.logger)
3348                                                                                 {
3349                                                                                         if let ChannelError::Ignore(msg) = e {
3350                                                                                                 log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
3351                                                                                         } else {
3352                                                                                                 panic!("Stated return value requirements in send_htlc() were not met");
3353                                                                                         }
3354                                                                                         let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
3355                                                                                         failed_forwards.push((htlc_source, payment_hash,
3356                                                                                                 HTLCFailReason::reason(failure_code, data),
3357                                                                                                 HTLCDestination::NextHopChannel { node_id: Some(chan.get().get_counterparty_node_id()), channel_id: forward_chan_id }
3358                                                                                         ));
3359                                                                                         continue;
3360                                                                                 }
3361                                                                         },
3362                                                                         HTLCForwardInfo::AddHTLC { .. } => {
3363                                                                                 panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
3364                                                                         },
3365                                                                         HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
3366                                                                                 log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
3367                                                                                 if let Err(e) = chan.get_mut().queue_fail_htlc(
3368                                                                                         htlc_id, err_packet, &self.logger
3369                                                                                 ) {
3370                                                                                         if let ChannelError::Ignore(msg) = e {
3371                                                                                                 log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
3372                                                                                         } else {
3373                                                                                                 panic!("Stated return value requirements in queue_fail_htlc() were not met");
3374                                                                                         }
3375                                                                                         // fail-backs are best-effort, we probably already have one
3376                                                                                         // pending, and if not that's OK, if not, the channel is on
3377                                                                                         // the chain and sending the HTLC-Timeout is their problem.
3378                                                                                         continue;
3379                                                                                 }
3380                                                                         },
3381                                                                 }
3382                                                         }
3383                                                 }
3384                                         }
3385                                 } else {
3386                                         for forward_info in pending_forwards.drain(..) {
3387                                                 match forward_info {
3388                                                         HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
3389                                                                 prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id,
3390                                                                 forward_info: PendingHTLCInfo {
3391                                                                         routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, ..
3392                                                                 }
3393                                                         }) => {
3394                                                                 let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
3395                                                                         PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
3396                                                                                 let _legacy_hop_data = Some(payment_data.clone());
3397                                                                                 (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
3398                                                                         },
3399                                                                         PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
3400                                                                                 (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None),
3401                                                                         _ => {
3402                                                                                 panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
3403                                                                         }
3404                                                                 };
3405                                                                 let mut claimable_htlc = ClaimableHTLC {
3406                                                                         prev_hop: HTLCPreviousHopData {
3407                                                                                 short_channel_id: prev_short_channel_id,
3408                                                                                 outpoint: prev_funding_outpoint,
3409                                                                                 htlc_id: prev_htlc_id,
3410                                                                                 incoming_packet_shared_secret: incoming_shared_secret,
3411                                                                                 phantom_shared_secret,
3412                                                                         },
3413                                                                         // We differentiate the received value from the sender intended value
3414                                                                         // if possible so that we don't prematurely mark MPP payments complete
3415                                                                         // if routing nodes overpay
3416                                                                         value: incoming_amt_msat.unwrap_or(outgoing_amt_msat),
3417                                                                         sender_intended_value: outgoing_amt_msat,
3418                                                                         timer_ticks: 0,
3419                                                                         total_value_received: None,
3420                                                                         total_msat: if let Some(data) = &payment_data { data.total_msat } else { outgoing_amt_msat },
3421                                                                         cltv_expiry,
3422                                                                         onion_payload,
3423                                                                 };
3424
3425                                                                 macro_rules! fail_htlc {
3426                                                                         ($htlc: expr, $payment_hash: expr) => {
3427                                                                                 let mut htlc_msat_height_data = $htlc.value.to_be_bytes().to_vec();
3428                                                                                 htlc_msat_height_data.extend_from_slice(
3429                                                                                         &self.best_block.read().unwrap().height().to_be_bytes(),
3430                                                                                 );
3431                                                                                 failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
3432                                                                                                 short_channel_id: $htlc.prev_hop.short_channel_id,
3433                                                                                                 outpoint: prev_funding_outpoint,
3434                                                                                                 htlc_id: $htlc.prev_hop.htlc_id,
3435                                                                                                 incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
3436                                                                                                 phantom_shared_secret,
3437                                                                                         }), payment_hash,
3438                                                                                         HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
3439                                                                                         HTLCDestination::FailedPayment { payment_hash: $payment_hash },
3440                                                                                 ));
3441                                                                         }
3442                                                                 }
3443                                                                 let phantom_shared_secret = claimable_htlc.prev_hop.phantom_shared_secret;
3444                                                                 let mut receiver_node_id = self.our_network_pubkey;
3445                                                                 if phantom_shared_secret.is_some() {
3446                                                                         receiver_node_id = self.node_signer.get_node_id(Recipient::PhantomNode)
3447                                                                                 .expect("Failed to get node_id for phantom node recipient");
3448                                                                 }
3449
3450                                                                 macro_rules! check_total_value {
3451                                                                         ($payment_data: expr, $payment_preimage: expr) => {{
3452                                                                                 let mut payment_claimable_generated = false;
3453                                                                                 let purpose = || {
3454                                                                                         events::PaymentPurpose::InvoicePayment {
3455                                                                                                 payment_preimage: $payment_preimage,
3456                                                                                                 payment_secret: $payment_data.payment_secret,
3457                                                                                         }
3458                                                                                 };
3459                                                                                 let mut claimable_payments = self.claimable_payments.lock().unwrap();
3460                                                                                 if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
3461                                                                                         fail_htlc!(claimable_htlc, payment_hash);
3462                                                                                         continue
3463                                                                                 }
3464                                                                                 let (_, ref mut htlcs) = claimable_payments.claimable_htlcs.entry(payment_hash)
3465                                                                                         .or_insert_with(|| (purpose(), Vec::new()));
3466                                                                                 if htlcs.len() == 1 {
3467                                                                                         if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
3468                                                                                                 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
3469                                                                                                 fail_htlc!(claimable_htlc, payment_hash);
3470                                                                                                 continue
3471                                                                                         }
3472                                                                                 }
3473                                                                                 let mut total_value = claimable_htlc.sender_intended_value;
3474                                                                                 let mut earliest_expiry = claimable_htlc.cltv_expiry;
3475                                                                                 for htlc in htlcs.iter() {
3476                                                                                         total_value += htlc.sender_intended_value;
3477                                                                                         earliest_expiry = cmp::min(earliest_expiry, htlc.cltv_expiry);
3478                                                                                         match &htlc.onion_payload {
3479                                                                                                 OnionPayload::Invoice { .. } => {
3480                                                                                                         if htlc.total_msat != $payment_data.total_msat {
3481                                                                                                                 log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
3482                                                                                                                         log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
3483                                                                                                                 total_value = msgs::MAX_VALUE_MSAT;
3484                                                                                                         }
3485                                                                                                         if total_value >= msgs::MAX_VALUE_MSAT { break; }
3486                                                                                                 },
3487                                                                                                 _ => unreachable!(),
3488                                                                                         }
3489                                                                                 }
3490                                                                                 // The condition determining whether an MPP is complete must
3491                                                                                 // match exactly the condition used in `timer_tick_occurred`
3492                                                                                 if total_value >= msgs::MAX_VALUE_MSAT {
3493                                                                                         fail_htlc!(claimable_htlc, payment_hash);
3494                                                                                 } else if total_value - claimable_htlc.sender_intended_value >= $payment_data.total_msat {
3495                                                                                         log_trace!(self.logger, "Failing HTLC with payment_hash {} as payment is already claimable",
3496                                                                                                 log_bytes!(payment_hash.0));
3497                                                                                         fail_htlc!(claimable_htlc, payment_hash);
3498                                                                                 } else if total_value >= $payment_data.total_msat {
3499                                                                                         let prev_channel_id = prev_funding_outpoint.to_channel_id();
3500                                                                                         htlcs.push(claimable_htlc);
3501                                                                                         let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum();
3502                                                                                         htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat));
3503                                                                                         new_events.push(events::Event::PaymentClaimable {
3504                                                                                                 receiver_node_id: Some(receiver_node_id),
3505                                                                                                 payment_hash,
3506                                                                                                 purpose: purpose(),
3507                                                                                                 amount_msat,
3508                                                                                                 via_channel_id: Some(prev_channel_id),
3509                                                                                                 via_user_channel_id: Some(prev_user_channel_id),
3510                                                                                                 claim_deadline: Some(earliest_expiry - HTLC_FAIL_BACK_BUFFER),
3511                                                                                         });
3512                                                                                         payment_claimable_generated = true;
3513                                                                                 } else {
3514                                                                                         // Nothing to do - we haven't reached the total
3515                                                                                         // payment value yet, wait until we receive more
3516                                                                                         // MPP parts.
3517                                                                                         htlcs.push(claimable_htlc);
3518                                                                                 }
3519                                                                                 payment_claimable_generated
3520                                                                         }}
3521                                                                 }
3522
3523                                                                 // Check that the payment hash and secret are known. Note that we
3524                                                                 // MUST take care to handle the "unknown payment hash" and
3525                                                                 // "incorrect payment secret" cases here identically or we'd expose
3526                                                                 // that we are the ultimate recipient of the given payment hash.
3527                                                                 // Further, we must not expose whether we have any other HTLCs
3528                                                                 // associated with the same payment_hash pending or not.
3529                                                                 let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
3530                                                                 match payment_secrets.entry(payment_hash) {
3531                                                                         hash_map::Entry::Vacant(_) => {
3532                                                                                 match claimable_htlc.onion_payload {
3533                                                                                         OnionPayload::Invoice { .. } => {
3534                                                                                                 let payment_data = payment_data.unwrap();
3535                                                                                                 let (payment_preimage, min_final_cltv_expiry_delta) = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
3536                                                                                                         Ok(result) => result,
3537                                                                                                         Err(()) => {
3538                                                                                                                 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as payment verification failed", log_bytes!(payment_hash.0));
3539                                                                                                                 fail_htlc!(claimable_htlc, payment_hash);
3540                                                                                                                 continue
3541                                                                                                         }
3542                                                                                                 };
3543                                                                                                 if let Some(min_final_cltv_expiry_delta) = min_final_cltv_expiry_delta {
3544                                                                                                         let expected_min_expiry_height = (self.current_best_block().height() + min_final_cltv_expiry_delta as u32) as u64;
3545                                                                                                         if (cltv_expiry as u64) < expected_min_expiry_height {
3546                                                                                                                 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as its CLTV expiry was too soon (had {}, earliest expected {})",
3547                                                                                                                         log_bytes!(payment_hash.0), cltv_expiry, expected_min_expiry_height);
3548                                                                                                                 fail_htlc!(claimable_htlc, payment_hash);
3549                                                                                                                 continue;
3550                                                                                                         }
3551                                                                                                 }
3552                                                                                                 check_total_value!(payment_data, payment_preimage);
3553                                                                                         },
3554                                                                                         OnionPayload::Spontaneous(preimage) => {
3555                                                                                                 let mut claimable_payments = self.claimable_payments.lock().unwrap();
3556                                                                                                 if claimable_payments.pending_claiming_payments.contains_key(&payment_hash) {
3557                                                                                                         fail_htlc!(claimable_htlc, payment_hash);
3558                                                                                                         continue
3559                                                                                                 }
3560                                                                                                 match claimable_payments.claimable_htlcs.entry(payment_hash) {
3561                                                                                                         hash_map::Entry::Vacant(e) => {
3562                                                                                                                 let amount_msat = claimable_htlc.value;
3563                                                                                                                 claimable_htlc.total_value_received = Some(amount_msat);
3564                                                                                                                 let claim_deadline = Some(claimable_htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER);
3565                                                                                                                 let purpose = events::PaymentPurpose::SpontaneousPayment(preimage);
3566                                                                                                                 e.insert((purpose.clone(), vec![claimable_htlc]));
3567                                                                                                                 let prev_channel_id = prev_funding_outpoint.to_channel_id();
3568                                                                                                                 new_events.push(events::Event::PaymentClaimable {
3569                                                                                                                         receiver_node_id: Some(receiver_node_id),
3570                                                                                                                         payment_hash,
3571                                                                                                                         amount_msat,
3572                                                                                                                         purpose,
3573                                                                                                                         via_channel_id: Some(prev_channel_id),
3574                                                                                                                         via_user_channel_id: Some(prev_user_channel_id),
3575                                                                                                                         claim_deadline,
3576                                                                                                                 });
3577                                                                                                         },
3578                                                                                                         hash_map::Entry::Occupied(_) => {
3579                                                                                                                 log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
3580                                                                                                                 fail_htlc!(claimable_htlc, payment_hash);
3581                                                                                                         }
3582                                                                                                 }
3583                                                                                         }
3584                                                                                 }
3585                                                                         },
3586                                                                         hash_map::Entry::Occupied(inbound_payment) => {
3587                                                                                 if payment_data.is_none() {
3588                                                                                         log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
3589                                                                                         fail_htlc!(claimable_htlc, payment_hash);
3590                                                                                         continue
3591                                                                                 };
3592                                                                                 let payment_data = payment_data.unwrap();
3593                                                                                 if inbound_payment.get().payment_secret != payment_data.payment_secret {
3594                                                                                         log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
3595                                                                                         fail_htlc!(claimable_htlc, payment_hash);
3596                                                                                 } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
3597                                                                                         log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
3598                                                                                                 log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
3599                                                                                         fail_htlc!(claimable_htlc, payment_hash);
3600                                                                                 } else {
3601                                                                                         let payment_claimable_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
3602                                                                                         if payment_claimable_generated {
3603                                                                                                 inbound_payment.remove_entry();
3604                                                                                         }
3605                                                                                 }
3606                                                                         },
3607                                                                 };
3608                                                         },
3609                                                         HTLCForwardInfo::FailHTLC { .. } => {
3610                                                                 panic!("Got pending fail of our own HTLC");
3611                                                         }
3612                                                 }
3613                                         }
3614                                 }
3615                         }
3616                 }
3617
3618                 let best_block_height = self.best_block.read().unwrap().height();
3619                 self.pending_outbound_payments.check_retry_payments(&self.router, || self.list_usable_channels(),
3620                         || self.compute_inflight_htlcs(), &self.entropy_source, &self.node_signer, best_block_height,
3621                         &self.pending_events, &self.logger,
3622                         |path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv|
3623                         self.send_payment_along_path(path, payment_hash, recipient_onion, total_value, cur_height, payment_id, keysend_preimage, session_priv));
3624
3625                 for (htlc_source, payment_hash, failure_reason, destination) in failed_forwards.drain(..) {
3626                         self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
3627                 }
3628                 self.forward_htlcs(&mut phantom_receives);
3629
3630                 // Freeing the holding cell here is relatively redundant - in practice we'll do it when we
3631                 // next get a `get_and_clear_pending_msg_events` call, but some tests rely on it, and it's
3632                 // nice to do the work now if we can rather than while we're trying to get messages in the
3633                 // network stack.
3634                 self.check_free_holding_cells();
3635
3636                 if new_events.is_empty() { return }
3637                 let mut events = self.pending_events.lock().unwrap();
3638                 events.append(&mut new_events);
3639         }
3640
3641         /// Free the background events, generally called from timer_tick_occurred.
3642         ///
3643         /// Exposed for testing to allow us to process events quickly without generating accidental
3644         /// BroadcastChannelUpdate events in timer_tick_occurred.
3645         ///
3646         /// Expects the caller to have a total_consistency_lock read lock.
3647         fn process_background_events(&self) -> bool {
3648                 let mut background_events = Vec::new();
3649                 mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
3650                 if background_events.is_empty() {
3651                         return false;
3652                 }
3653
3654                 for event in background_events.drain(..) {
3655                         match event {
3656                                 BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
3657                                         // The channel has already been closed, so no use bothering to care about the
3658                                         // monitor updating completing.
3659                                         let _ = self.chain_monitor.update_channel(funding_txo, &update);
3660                                 },
3661                         }
3662                 }
3663                 true
3664         }
3665
3666         #[cfg(any(test, feature = "_test_utils"))]
3667         /// Process background events, for functional testing
3668         pub fn test_process_background_events(&self) {
3669                 self.process_background_events();
3670         }
3671
3672         fn update_channel_fee(&self, chan_id: &[u8; 32], chan: &mut Channel<<SP::Target as SignerProvider>::Signer>, new_feerate: u32) -> NotifyOption {
3673                 if !chan.is_outbound() { return NotifyOption::SkipPersist; }
3674                 // If the feerate has decreased by less than half, don't bother
3675                 if new_feerate <= chan.get_feerate_sat_per_1000_weight() && new_feerate * 2 > chan.get_feerate_sat_per_1000_weight() {
3676                         log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
3677                                 log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
3678                         return NotifyOption::SkipPersist;
3679                 }
3680                 if !chan.is_live() {
3681                         log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
3682                                 log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
3683                         return NotifyOption::SkipPersist;
3684                 }
3685                 log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
3686                         log_bytes!(chan_id[..]), chan.get_feerate_sat_per_1000_weight(), new_feerate);
3687
3688                 chan.queue_update_fee(new_feerate, &self.logger);
3689                 NotifyOption::DoPersist
3690         }
3691
3692         #[cfg(fuzzing)]
3693         /// In chanmon_consistency we want to sometimes do the channel fee updates done in
3694         /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
3695         /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
3696         /// it wants to detect). Thus, we have a variant exposed here for its benefit.
3697         pub fn maybe_update_chan_fees(&self) {
3698                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
3699                         let mut should_persist = NotifyOption::SkipPersist;
3700
3701                         let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
3702
3703                         let per_peer_state = self.per_peer_state.read().unwrap();
3704                         for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
3705                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3706                                 let peer_state = &mut *peer_state_lock;
3707                                 for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
3708                                         let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
3709                                         if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
3710                                 }
3711                         }
3712
3713                         should_persist
3714                 });
3715         }
3716
3717         /// Performs actions which should happen on startup and roughly once per minute thereafter.
3718         ///
3719         /// This currently includes:
3720         ///  * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
3721         ///  * Broadcasting [`ChannelUpdate`] messages if we've been disconnected from our peer for more
3722         ///    than a minute, informing the network that they should no longer attempt to route over
3723         ///    the channel.
3724         ///  * Expiring a channel's previous [`ChannelConfig`] if necessary to only allow forwarding HTLCs
3725         ///    with the current [`ChannelConfig`].
3726         ///  * Removing peers which have disconnected but and no longer have any channels.
3727         ///
3728         /// Note that this may cause reentrancy through [`chain::Watch::update_channel`] calls or feerate
3729         /// estimate fetches.
3730         ///
3731         /// [`ChannelUpdate`]: msgs::ChannelUpdate
3732         /// [`ChannelConfig`]: crate::util::config::ChannelConfig
3733         pub fn timer_tick_occurred(&self) {
3734                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
3735                         let mut should_persist = NotifyOption::SkipPersist;
3736                         if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
3737
3738                         let new_feerate = self.fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::Normal);
3739
3740                         let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
3741                         let mut timed_out_mpp_htlcs = Vec::new();
3742                         let mut pending_peers_awaiting_removal = Vec::new();
3743                         {
3744                                 let per_peer_state = self.per_peer_state.read().unwrap();
3745                                 for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
3746                                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3747                                         let peer_state = &mut *peer_state_lock;
3748                                         let pending_msg_events = &mut peer_state.pending_msg_events;
3749                                         let counterparty_node_id = *counterparty_node_id;
3750                                         peer_state.channel_by_id.retain(|chan_id, chan| {
3751                                                 let chan_needs_persist = self.update_channel_fee(chan_id, chan, new_feerate);
3752                                                 if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
3753
3754                                                 if let Err(e) = chan.timer_check_closing_negotiation_progress() {
3755                                                         let (needs_close, err) = convert_chan_err!(self, e, chan, chan_id);
3756                                                         handle_errors.push((Err(err), counterparty_node_id));
3757                                                         if needs_close { return false; }
3758                                                 }
3759
3760                                                 match chan.channel_update_status() {
3761                                                         ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(0)),
3762                                                         ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(0)),
3763                                                         ChannelUpdateStatus::DisabledStaged(_) if chan.is_live()
3764                                                                 => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
3765                                                         ChannelUpdateStatus::EnabledStaged(_) if !chan.is_live()
3766                                                                 => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
3767                                                         ChannelUpdateStatus::DisabledStaged(mut n) if !chan.is_live() => {
3768                                                                 n += 1;
3769                                                                 if n >= DISABLE_GOSSIP_TICKS {
3770                                                                         chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
3771                                                                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
3772                                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
3773                                                                                         msg: update
3774                                                                                 });
3775                                                                         }
3776                                                                         should_persist = NotifyOption::DoPersist;
3777                                                                 } else {
3778                                                                         chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged(n));
3779                                                                 }
3780                                                         },
3781                                                         ChannelUpdateStatus::EnabledStaged(mut n) if chan.is_live() => {
3782                                                                 n += 1;
3783                                                                 if n >= ENABLE_GOSSIP_TICKS {
3784                                                                         chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
3785                                                                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
3786                                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
3787                                                                                         msg: update
3788                                                                                 });
3789                                                                         }
3790                                                                         should_persist = NotifyOption::DoPersist;
3791                                                                 } else {
3792                                                                         chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged(n));
3793                                                                 }
3794                                                         },
3795                                                         _ => {},
3796                                                 }
3797
3798                                                 chan.maybe_expire_prev_config();
3799
3800                                                 true
3801                                         });
3802                                         if peer_state.ok_to_remove(true) {
3803                                                 pending_peers_awaiting_removal.push(counterparty_node_id);
3804                                         }
3805                                 }
3806                         }
3807
3808                         // When a peer disconnects but still has channels, the peer's `peer_state` entry in the
3809                         // `per_peer_state` is not removed by the `peer_disconnected` function. If the channels
3810                         // of to that peer is later closed while still being disconnected (i.e. force closed),
3811                         // we therefore need to remove the peer from `peer_state` separately.
3812                         // To avoid having to take the `per_peer_state` `write` lock once the channels are
3813                         // closed, we instead remove such peers awaiting removal here on a timer, to limit the
3814                         // negative effects on parallelism as much as possible.
3815                         if pending_peers_awaiting_removal.len() > 0 {
3816                                 let mut per_peer_state = self.per_peer_state.write().unwrap();
3817                                 for counterparty_node_id in pending_peers_awaiting_removal {
3818                                         match per_peer_state.entry(counterparty_node_id) {
3819                                                 hash_map::Entry::Occupied(entry) => {
3820                                                         // Remove the entry if the peer is still disconnected and we still
3821                                                         // have no channels to the peer.
3822                                                         let remove_entry = {
3823                                                                 let peer_state = entry.get().lock().unwrap();
3824                                                                 peer_state.ok_to_remove(true)
3825                                                         };
3826                                                         if remove_entry {
3827                                                                 entry.remove_entry();
3828                                                         }
3829                                                 },
3830                                                 hash_map::Entry::Vacant(_) => { /* The PeerState has already been removed */ }
3831                                         }
3832                                 }
3833                         }
3834
3835                         self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
3836                                 if htlcs.is_empty() {
3837                                         // This should be unreachable
3838                                         debug_assert!(false);
3839                                         return false;
3840                                 }
3841                                 if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
3842                                         // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
3843                                         // In this case we're not going to handle any timeouts of the parts here.
3844                                         // This condition determining whether the MPP is complete here must match
3845                                         // exactly the condition used in `process_pending_htlc_forwards`.
3846                                         if htlcs[0].total_msat <= htlcs.iter().fold(0, |total, htlc| total + htlc.sender_intended_value) {
3847                                                 return true;
3848                                         } else if htlcs.into_iter().any(|htlc| {
3849                                                 htlc.timer_ticks += 1;
3850                                                 return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
3851                                         }) {
3852                                                 timed_out_mpp_htlcs.extend(htlcs.drain(..).map(|htlc: ClaimableHTLC| (htlc.prev_hop, *payment_hash)));
3853                                                 return false;
3854                                         }
3855                                 }
3856                                 true
3857                         });
3858
3859                         for htlc_source in timed_out_mpp_htlcs.drain(..) {
3860                                 let source = HTLCSource::PreviousHopData(htlc_source.0.clone());
3861                                 let reason = HTLCFailReason::from_failure_code(23);
3862                                 let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 };
3863                                 self.fail_htlc_backwards_internal(&source, &htlc_source.1, &reason, receiver);
3864                         }
3865
3866                         for (err, counterparty_node_id) in handle_errors.drain(..) {
3867                                 let _ = handle_error!(self, err, counterparty_node_id);
3868                         }
3869
3870                         self.pending_outbound_payments.remove_stale_resolved_payments(&self.pending_events);
3871
3872                         // Technically we don't need to do this here, but if we have holding cell entries in a
3873                         // channel that need freeing, it's better to do that here and block a background task
3874                         // than block the message queueing pipeline.
3875                         if self.check_free_holding_cells() {
3876                                 should_persist = NotifyOption::DoPersist;
3877                         }
3878
3879                         should_persist
3880                 });
3881         }
3882
3883         /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
3884         /// after a PaymentClaimable event, failing the HTLC back to its origin and freeing resources
3885         /// along the path (including in our own channel on which we received it).
3886         ///
3887         /// Note that in some cases around unclean shutdown, it is possible the payment may have
3888         /// already been claimed by you via [`ChannelManager::claim_funds`] prior to you seeing (a
3889         /// second copy of) the [`events::Event::PaymentClaimable`] event. Alternatively, the payment
3890         /// may have already been failed automatically by LDK if it was nearing its expiration time.
3891         ///
3892         /// While LDK will never claim a payment automatically on your behalf (i.e. without you calling
3893         /// [`ChannelManager::claim_funds`]), you should still monitor for
3894         /// [`events::Event::PaymentClaimed`] events even for payments you intend to fail, especially on
3895         /// startup during which time claims that were in-progress at shutdown may be replayed.
3896         pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) {
3897                 self.fail_htlc_backwards_with_reason(payment_hash, FailureCode::IncorrectOrUnknownPaymentDetails);
3898         }
3899
3900         /// This is a variant of [`ChannelManager::fail_htlc_backwards`] that allows you to specify the
3901         /// reason for the failure.
3902         ///
3903         /// See [`FailureCode`] for valid failure codes.
3904         pub fn fail_htlc_backwards_with_reason(&self, payment_hash: &PaymentHash, failure_code: FailureCode) {
3905                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
3906
3907                 let removed_source = self.claimable_payments.lock().unwrap().claimable_htlcs.remove(payment_hash);
3908                 if let Some((_, mut sources)) = removed_source {
3909                         for htlc in sources.drain(..) {
3910                                 let reason = self.get_htlc_fail_reason_from_failure_code(failure_code, &htlc);
3911                                 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
3912                                 let receiver = HTLCDestination::FailedPayment { payment_hash: *payment_hash };
3913                                 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
3914                         }
3915                 }
3916         }
3917
3918         /// Gets error data to form an [`HTLCFailReason`] given a [`FailureCode`] and [`ClaimableHTLC`].
3919         fn get_htlc_fail_reason_from_failure_code(&self, failure_code: FailureCode, htlc: &ClaimableHTLC) -> HTLCFailReason {
3920                 match failure_code {
3921                         FailureCode::TemporaryNodeFailure => HTLCFailReason::from_failure_code(failure_code as u16),
3922                         FailureCode::RequiredNodeFeatureMissing => HTLCFailReason::from_failure_code(failure_code as u16),
3923                         FailureCode::IncorrectOrUnknownPaymentDetails => {
3924                                 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
3925                                 htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
3926                                 HTLCFailReason::reason(failure_code as u16, htlc_msat_height_data)
3927                         }
3928                 }
3929         }
3930
3931         /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
3932         /// that we want to return and a channel.
3933         ///
3934         /// This is for failures on the channel on which the HTLC was *received*, not failures
3935         /// forwarding
3936         fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
3937                 // We can't be sure what SCID was used when relaying inbound towards us, so we have to
3938                 // guess somewhat. If its a public channel, we figure best to just use the real SCID (as
3939                 // we're not leaking that we have a channel with the counterparty), otherwise we try to use
3940                 // an inbound SCID alias before the real SCID.
3941                 let scid_pref = if chan.should_announce() {
3942                         chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
3943                 } else {
3944                         chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
3945                 };
3946                 if let Some(scid) = scid_pref {
3947                         self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
3948                 } else {
3949                         (0x4000|10, Vec::new())
3950                 }
3951         }
3952
3953
3954         /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
3955         /// that we want to return and a channel.
3956         fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<<SP::Target as SignerProvider>::Signer>) -> (u16, Vec<u8>) {
3957                 debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
3958                 if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
3959                         let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
3960                         if desired_err_code == 0x1000 | 20 {
3961                                 // No flags for `disabled_flags` are currently defined so they're always two zero bytes.
3962                                 // See https://github.com/lightning/bolts/blob/341ec84/04-onion-routing.md?plain=1#L1008
3963                                 0u16.write(&mut enc).expect("Writes cannot fail");
3964                         }
3965                         (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
3966                         msgs::ChannelUpdate::TYPE.write(&mut enc).expect("Writes cannot fail");
3967                         upd.write(&mut enc).expect("Writes cannot fail");
3968                         (desired_err_code, enc.0)
3969                 } else {
3970                         // If we fail to get a unicast channel_update, it implies we don't yet have an SCID,
3971                         // which means we really shouldn't have gotten a payment to be forwarded over this
3972                         // channel yet, or if we did it's from a route hint. Either way, returning an error of
3973                         // PERM|no_such_channel should be fine.
3974                         (0x4000|10, Vec::new())
3975                 }
3976         }
3977
3978         // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
3979         // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
3980         // be surfaced to the user.
3981         fn fail_holding_cell_htlcs(
3982                 &self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32],
3983                 counterparty_node_id: &PublicKey
3984         ) {
3985                 let (failure_code, onion_failure_data) = {
3986                         let per_peer_state = self.per_peer_state.read().unwrap();
3987                         if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
3988                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
3989                                 let peer_state = &mut *peer_state_lock;
3990                                 match peer_state.channel_by_id.entry(channel_id) {
3991                                         hash_map::Entry::Occupied(chan_entry) => {
3992                                                 self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
3993                                         },
3994                                         hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
3995                                 }
3996                         } else { (0x4000|10, Vec::new()) }
3997                 };
3998
3999                 for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
4000                         let reason = HTLCFailReason::reason(failure_code, onion_failure_data.clone());
4001                         let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id };
4002                         self.fail_htlc_backwards_internal(&htlc_src, &payment_hash, &reason, receiver);
4003                 }
4004         }
4005
4006         /// Fails an HTLC backwards to the sender of it to us.
4007         /// Note that we do not assume that channels corresponding to failed HTLCs are still available.
4008         fn fail_htlc_backwards_internal(&self, source: &HTLCSource, payment_hash: &PaymentHash, onion_error: &HTLCFailReason, destination: HTLCDestination) {
4009                 // Ensure that no peer state channel storage lock is held when calling this function.
4010                 // This ensures that future code doesn't introduce a lock-order requirement for
4011                 // `forward_htlcs` to be locked after the `per_peer_state` peer locks, which calling
4012                 // this function with any `per_peer_state` peer lock acquired would.
4013                 for (_, peer) in self.per_peer_state.read().unwrap().iter() {
4014                         debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
4015                 }
4016
4017                 //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
4018                 //identify whether we sent it or not based on the (I presume) very different runtime
4019                 //between the branches here. We should make this async and move it into the forward HTLCs
4020                 //timer handling.
4021
4022                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
4023                 // from block_connected which may run during initialization prior to the chain_monitor
4024                 // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
4025                 match source {
4026                         HTLCSource::OutboundRoute { ref path, ref session_priv, ref payment_id, .. } => {
4027                                 if self.pending_outbound_payments.fail_htlc(source, payment_hash, onion_error, path,
4028                                         session_priv, payment_id, self.probing_cookie_secret, &self.secp_ctx,
4029                                         &self.pending_events, &self.logger)
4030                                 { self.push_pending_forwards_ev(); }
4031                         },
4032                         HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, ref phantom_shared_secret, ref outpoint }) => {
4033                                 log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with {:?}", log_bytes!(payment_hash.0), onion_error);
4034                                 let err_packet = onion_error.get_encrypted_failure_packet(incoming_packet_shared_secret, phantom_shared_secret);
4035
4036                                 let mut push_forward_ev = false;
4037                                 let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
4038                                 if forward_htlcs.is_empty() {
4039                                         push_forward_ev = true;
4040                                 }
4041                                 match forward_htlcs.entry(*short_channel_id) {
4042                                         hash_map::Entry::Occupied(mut entry) => {
4043                                                 entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet });
4044                                         },
4045                                         hash_map::Entry::Vacant(entry) => {
4046                                                 entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id: *htlc_id, err_packet }));
4047                                         }
4048                                 }
4049                                 mem::drop(forward_htlcs);
4050                                 if push_forward_ev { self.push_pending_forwards_ev(); }
4051                                 let mut pending_events = self.pending_events.lock().unwrap();
4052                                 pending_events.push(events::Event::HTLCHandlingFailed {
4053                                         prev_channel_id: outpoint.to_channel_id(),
4054                                         failed_next_destination: destination,
4055                                 });
4056                         },
4057                 }
4058         }
4059
4060         /// Provides a payment preimage in response to [`Event::PaymentClaimable`], generating any
4061         /// [`MessageSendEvent`]s needed to claim the payment.
4062         ///
4063         /// This method is guaranteed to ensure the payment has been claimed but only if the current
4064         /// height is strictly below [`Event::PaymentClaimable::claim_deadline`]. To avoid race
4065         /// conditions, you should wait for an [`Event::PaymentClaimed`] before considering the payment
4066         /// successful. It will generally be available in the next [`process_pending_events`] call.
4067         ///
4068         /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
4069         /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentClaimable`
4070         /// event matches your expectation. If you fail to do so and call this method, you may provide
4071         /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
4072         ///
4073         /// [`Event::PaymentClaimable`]: crate::events::Event::PaymentClaimable
4074         /// [`Event::PaymentClaimable::claim_deadline`]: crate::events::Event::PaymentClaimable::claim_deadline
4075         /// [`Event::PaymentClaimed`]: crate::events::Event::PaymentClaimed
4076         /// [`process_pending_events`]: EventsProvider::process_pending_events
4077         /// [`create_inbound_payment`]: Self::create_inbound_payment
4078         /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
4079         pub fn claim_funds(&self, payment_preimage: PaymentPreimage) {
4080                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
4081
4082                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
4083
4084                 let mut sources = {
4085                         let mut claimable_payments = self.claimable_payments.lock().unwrap();
4086                         if let Some((payment_purpose, sources)) = claimable_payments.claimable_htlcs.remove(&payment_hash) {
4087                                 let mut receiver_node_id = self.our_network_pubkey;
4088                                 for htlc in sources.iter() {
4089                                         if htlc.prev_hop.phantom_shared_secret.is_some() {
4090                                                 let phantom_pubkey = self.node_signer.get_node_id(Recipient::PhantomNode)
4091                                                         .expect("Failed to get node_id for phantom node recipient");
4092                                                 receiver_node_id = phantom_pubkey;
4093                                                 break;
4094                                         }
4095                                 }
4096
4097                                 let dup_purpose = claimable_payments.pending_claiming_payments.insert(payment_hash,
4098                                         ClaimingPayment { amount_msat: sources.iter().map(|source| source.value).sum(),
4099                                         payment_purpose, receiver_node_id,
4100                                 });
4101                                 if dup_purpose.is_some() {
4102                                         debug_assert!(false, "Shouldn't get a duplicate pending claim event ever");
4103                                         log_error!(self.logger, "Got a duplicate pending claimable event on payment hash {}! Please report this bug",
4104                                                 log_bytes!(payment_hash.0));
4105                                 }
4106                                 sources
4107                         } else { return; }
4108                 };
4109                 debug_assert!(!sources.is_empty());
4110
4111                 // Just in case one HTLC has been failed between when we generated the `PaymentClaimable`
4112                 // and when we got here we need to check that the amount we're about to claim matches the
4113                 // amount we told the user in the last `PaymentClaimable`. We also do a sanity-check that
4114                 // the MPP parts all have the same `total_msat`.
4115                 let mut claimable_amt_msat = 0;
4116                 let mut prev_total_msat = None;
4117                 let mut expected_amt_msat = None;
4118                 let mut valid_mpp = true;
4119                 let mut errs = Vec::new();
4120                 let per_peer_state = self.per_peer_state.read().unwrap();
4121                 for htlc in sources.iter() {
4122                         if prev_total_msat.is_some() && prev_total_msat != Some(htlc.total_msat) {
4123                                 log_error!(self.logger, "Somehow ended up with an MPP payment with different expected total amounts - this should not be reachable!");
4124                                 debug_assert!(false);
4125                                 valid_mpp = false;
4126                                 break;
4127                         }
4128                         prev_total_msat = Some(htlc.total_msat);
4129
4130                         if expected_amt_msat.is_some() && expected_amt_msat != htlc.total_value_received {
4131                                 log_error!(self.logger, "Somehow ended up with an MPP payment with different received total amounts - this should not be reachable!");
4132                                 debug_assert!(false);
4133                                 valid_mpp = false;
4134                                 break;
4135                         }
4136                         expected_amt_msat = htlc.total_value_received;
4137
4138                         if let OnionPayload::Spontaneous(_) = &htlc.onion_payload {
4139                                 // We don't currently support MPP for spontaneous payments, so just check
4140                                 // that there's one payment here and move on.
4141                                 if sources.len() != 1 {
4142                                         log_error!(self.logger, "Somehow ended up with an MPP spontaneous payment - this should not be reachable!");
4143                                         debug_assert!(false);
4144                                         valid_mpp = false;
4145                                         break;
4146                                 }
4147                         }
4148
4149                         claimable_amt_msat += htlc.value;
4150                 }
4151                 mem::drop(per_peer_state);
4152                 if sources.is_empty() || expected_amt_msat.is_none() {
4153                         self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
4154                         log_info!(self.logger, "Attempted to claim an incomplete payment which no longer had any available HTLCs!");
4155                         return;
4156                 }
4157                 if claimable_amt_msat != expected_amt_msat.unwrap() {
4158                         self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
4159                         log_info!(self.logger, "Attempted to claim an incomplete payment, expected {} msat, had {} available to claim.",
4160                                 expected_amt_msat.unwrap(), claimable_amt_msat);
4161                         return;
4162                 }
4163                 if valid_mpp {
4164                         for htlc in sources.drain(..) {
4165                                 if let Err((pk, err)) = self.claim_funds_from_hop(
4166                                         htlc.prev_hop, payment_preimage,
4167                                         |_| Some(MonitorUpdateCompletionAction::PaymentClaimed { payment_hash }))
4168                                 {
4169                                         if let msgs::ErrorAction::IgnoreError = err.err.action {
4170                                                 // We got a temporary failure updating monitor, but will claim the
4171                                                 // HTLC when the monitor updating is restored (or on chain).
4172                                                 log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
4173                                         } else { errs.push((pk, err)); }
4174                                 }
4175                         }
4176                 }
4177                 if !valid_mpp {
4178                         for htlc in sources.drain(..) {
4179                                 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
4180                                 htlc_msat_height_data.extend_from_slice(&self.best_block.read().unwrap().height().to_be_bytes());
4181                                 let source = HTLCSource::PreviousHopData(htlc.prev_hop);
4182                                 let reason = HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data);
4183                                 let receiver = HTLCDestination::FailedPayment { payment_hash };
4184                                 self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
4185                         }
4186                         self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
4187                 }
4188
4189                 // Now we can handle any errors which were generated.
4190                 for (counterparty_node_id, err) in errs.drain(..) {
4191                         let res: Result<(), _> = Err(err);
4192                         let _ = handle_error!(self, res, counterparty_node_id);
4193                 }
4194         }
4195
4196         fn claim_funds_from_hop<ComplFunc: FnOnce(Option<u64>) -> Option<MonitorUpdateCompletionAction>>(&self,
4197                 prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage, completion_action: ComplFunc)
4198         -> Result<(), (PublicKey, MsgHandleErrInternal)> {
4199                 //TODO: Delay the claimed_funds relaying just like we do outbound relay!
4200
4201                 {
4202                         let per_peer_state = self.per_peer_state.read().unwrap();
4203                         let chan_id = prev_hop.outpoint.to_channel_id();
4204                         let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) {
4205                                 Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()),
4206                                 None => None
4207                         };
4208
4209                         let peer_state_opt = counterparty_node_id_opt.as_ref().map(
4210                                 |counterparty_node_id| per_peer_state.get(counterparty_node_id)
4211                                         .map(|peer_mutex| peer_mutex.lock().unwrap())
4212                         ).unwrap_or(None);
4213
4214                         if peer_state_opt.is_some() {
4215                                 let mut peer_state_lock = peer_state_opt.unwrap();
4216                                 let peer_state = &mut *peer_state_lock;
4217                                 if let hash_map::Entry::Occupied(mut chan) = peer_state.channel_by_id.entry(chan_id) {
4218                                         let counterparty_node_id = chan.get().get_counterparty_node_id();
4219                                         let fulfill_res = chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger);
4220
4221                                         if let UpdateFulfillCommitFetch::NewClaim { htlc_value_msat, monitor_update } = fulfill_res {
4222                                                 if let Some(action) = completion_action(Some(htlc_value_msat)) {
4223                                                         log_trace!(self.logger, "Tracking monitor update completion action for channel {}: {:?}",
4224                                                                 log_bytes!(chan_id), action);
4225                                                         peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
4226                                                 }
4227                                                 let update_id = monitor_update.update_id;
4228                                                 let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, monitor_update);
4229                                                 let res = handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
4230                                                         peer_state, per_peer_state, chan);
4231                                                 if let Err(e) = res {
4232                                                         // TODO: This is a *critical* error - we probably updated the outbound edge
4233                                                         // of the HTLC's monitor with a preimage. We should retry this monitor
4234                                                         // update over and over again until morale improves.
4235                                                         log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
4236                                                         return Err((counterparty_node_id, e));
4237                                                 }
4238                                         }
4239                                         return Ok(());
4240                                 }
4241                         }
4242                 }
4243                 let preimage_update = ChannelMonitorUpdate {
4244                         update_id: CLOSED_CHANNEL_UPDATE_ID,
4245                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
4246                                 payment_preimage,
4247                         }],
4248                 };
4249                 // We update the ChannelMonitor on the backward link, after
4250                 // receiving an `update_fulfill_htlc` from the forward link.
4251                 let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update);
4252                 if update_res != ChannelMonitorUpdateStatus::Completed {
4253                         // TODO: This needs to be handled somehow - if we receive a monitor update
4254                         // with a preimage we *must* somehow manage to propagate it to the upstream
4255                         // channel, or we must have an ability to receive the same event and try
4256                         // again on restart.
4257                         log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
4258                                 payment_preimage, update_res);
4259                 }
4260                 // Note that we do process the completion action here. This totally could be a
4261                 // duplicate claim, but we have no way of knowing without interrogating the
4262                 // `ChannelMonitor` we've provided the above update to. Instead, note that `Event`s are
4263                 // generally always allowed to be duplicative (and it's specifically noted in
4264                 // `PaymentForwarded`).
4265                 self.handle_monitor_update_completion_actions(completion_action(None));
4266                 Ok(())
4267         }
4268
4269         fn finalize_claims(&self, sources: Vec<HTLCSource>) {
4270                 self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
4271         }
4272
4273         fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
4274                 match source {
4275                         HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
4276                                 self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
4277                         },
4278                         HTLCSource::PreviousHopData(hop_data) => {
4279                                 let prev_outpoint = hop_data.outpoint;
4280                                 let res = self.claim_funds_from_hop(hop_data, payment_preimage,
4281                                         |htlc_claim_value_msat| {
4282                                                 if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
4283                                                         let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
4284                                                                 Some(claimed_htlc_value - forwarded_htlc_value)
4285                                                         } else { None };
4286
4287                                                         let prev_channel_id = Some(prev_outpoint.to_channel_id());
4288                                                         let next_channel_id = Some(next_channel_id);
4289
4290                                                         Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
4291                                                                 fee_earned_msat,
4292                                                                 claim_from_onchain_tx: from_onchain,
4293                                                                 prev_channel_id,
4294                                                                 next_channel_id,
4295                                                                 outbound_amount_forwarded_msat: forwarded_htlc_value_msat,
4296                                                         }})
4297                                                 } else { None }
4298                                         });
4299                                 if let Err((pk, err)) = res {
4300                                         let result: Result<(), _> = Err(err);
4301                                         let _ = handle_error!(self, result, pk);
4302                                 }
4303                         },
4304                 }
4305         }
4306
4307         /// Gets the node_id held by this ChannelManager
4308         pub fn get_our_node_id(&self) -> PublicKey {
4309                 self.our_network_pubkey.clone()
4310         }
4311
4312         fn handle_monitor_update_completion_actions<I: IntoIterator<Item=MonitorUpdateCompletionAction>>(&self, actions: I) {
4313                 for action in actions.into_iter() {
4314                         match action {
4315                                 MonitorUpdateCompletionAction::PaymentClaimed { payment_hash } => {
4316                                         let payment = self.claimable_payments.lock().unwrap().pending_claiming_payments.remove(&payment_hash);
4317                                         if let Some(ClaimingPayment { amount_msat, payment_purpose: purpose, receiver_node_id }) = payment {
4318                                                 self.pending_events.lock().unwrap().push(events::Event::PaymentClaimed {
4319                                                         payment_hash, purpose, amount_msat, receiver_node_id: Some(receiver_node_id),
4320                                                 });
4321                                         }
4322                                 },
4323                                 MonitorUpdateCompletionAction::EmitEvent { event } => {
4324                                         self.pending_events.lock().unwrap().push(event);
4325                                 },
4326                         }
4327                 }
4328         }
4329
4330         /// Handles a channel reentering a functional state, either due to reconnect or a monitor
4331         /// update completion.
4332         fn handle_channel_resumption(&self, pending_msg_events: &mut Vec<MessageSendEvent>,
4333                 channel: &mut Channel<<SP::Target as SignerProvider>::Signer>, raa: Option<msgs::RevokeAndACK>,
4334                 commitment_update: Option<msgs::CommitmentUpdate>, order: RAACommitmentOrder,
4335                 pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option<Transaction>,
4336                 channel_ready: Option<msgs::ChannelReady>, announcement_sigs: Option<msgs::AnnouncementSignatures>)
4337         -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> {
4338                 log_trace!(self.logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement",
4339                         log_bytes!(channel.channel_id()),
4340                         if raa.is_some() { "an" } else { "no" },
4341                         if commitment_update.is_some() { "a" } else { "no" }, pending_forwards.len(),
4342                         if funding_broadcastable.is_some() { "" } else { "not " },
4343                         if channel_ready.is_some() { "sending" } else { "without" },
4344                         if announcement_sigs.is_some() { "sending" } else { "without" });
4345
4346                 let mut htlc_forwards = None;
4347
4348                 let counterparty_node_id = channel.get_counterparty_node_id();
4349                 if !pending_forwards.is_empty() {
4350                         htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()),
4351                                 channel.get_funding_txo().unwrap(), channel.get_user_id(), pending_forwards));
4352                 }
4353
4354                 if let Some(msg) = channel_ready {
4355                         send_channel_ready!(self, pending_msg_events, channel, msg);
4356                 }
4357                 if let Some(msg) = announcement_sigs {
4358                         pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
4359                                 node_id: counterparty_node_id,
4360                                 msg,
4361                         });
4362                 }
4363
4364                 macro_rules! handle_cs { () => {
4365                         if let Some(update) = commitment_update {
4366                                 pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
4367                                         node_id: counterparty_node_id,
4368                                         updates: update,
4369                                 });
4370                         }
4371                 } }
4372                 macro_rules! handle_raa { () => {
4373                         if let Some(revoke_and_ack) = raa {
4374                                 pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
4375                                         node_id: counterparty_node_id,
4376                                         msg: revoke_and_ack,
4377                                 });
4378                         }
4379                 } }
4380                 match order {
4381                         RAACommitmentOrder::CommitmentFirst => {
4382                                 handle_cs!();
4383                                 handle_raa!();
4384                         },
4385                         RAACommitmentOrder::RevokeAndACKFirst => {
4386                                 handle_raa!();
4387                                 handle_cs!();
4388                         },
4389                 }
4390
4391                 if let Some(tx) = funding_broadcastable {
4392                         log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
4393                         self.tx_broadcaster.broadcast_transaction(&tx);
4394                 }
4395
4396                 {
4397                         let mut pending_events = self.pending_events.lock().unwrap();
4398                         emit_channel_pending_event!(pending_events, channel);
4399                         emit_channel_ready_event!(pending_events, channel);
4400                 }
4401
4402                 htlc_forwards
4403         }
4404
4405         fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) {
4406                 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
4407
4408                 let counterparty_node_id = match counterparty_node_id {
4409                         Some(cp_id) => cp_id.clone(),
4410                         None => {
4411                                 // TODO: Once we can rely on the counterparty_node_id from the
4412                                 // monitor event, this and the id_to_peer map should be removed.
4413                                 let id_to_peer = self.id_to_peer.lock().unwrap();
4414                                 match id_to_peer.get(&funding_txo.to_channel_id()) {
4415                                         Some(cp_id) => cp_id.clone(),
4416                                         None => return,
4417                                 }
4418                         }
4419                 };
4420                 let per_peer_state = self.per_peer_state.read().unwrap();
4421                 let mut peer_state_lock;
4422                 let peer_state_mutex_opt = per_peer_state.get(&counterparty_node_id);
4423                 if peer_state_mutex_opt.is_none() { return }
4424                 peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
4425                 let peer_state = &mut *peer_state_lock;
4426                 let mut channel = {
4427                         match peer_state.channel_by_id.entry(funding_txo.to_channel_id()){
4428                                 hash_map::Entry::Occupied(chan) => chan,
4429                                 hash_map::Entry::Vacant(_) => return,
4430                         }
4431                 };
4432                 log_trace!(self.logger, "ChannelMonitor updated to {}. Current highest is {}",
4433                         highest_applied_update_id, channel.get().get_latest_monitor_update_id());
4434                 if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
4435                         return;
4436                 }
4437                 handle_monitor_update_completion!(self, highest_applied_update_id, peer_state_lock, peer_state, per_peer_state, channel.get_mut());
4438         }
4439
4440         /// Accepts a request to open a channel after a [`Event::OpenChannelRequest`].
4441         ///
4442         /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
4443         /// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
4444         /// the channel.
4445         ///
4446         /// The `user_channel_id` parameter will be provided back in
4447         /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
4448         /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
4449         ///
4450         /// Note that this method will return an error and reject the channel, if it requires support
4451         /// for zero confirmations. Instead, `accept_inbound_channel_from_trusted_peer_0conf` must be
4452         /// used to accept such channels.
4453         ///
4454         /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
4455         /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
4456         pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
4457                 self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, false, user_channel_id)
4458         }
4459
4460         /// Accepts a request to open a channel after a [`events::Event::OpenChannelRequest`], treating
4461         /// it as confirmed immediately.
4462         ///
4463         /// The `user_channel_id` parameter will be provided back in
4464         /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
4465         /// with which `accept_inbound_channel`/`accept_inbound_channel_from_trusted_peer_0conf` call.
4466         ///
4467         /// Unlike [`ChannelManager::accept_inbound_channel`], this method accepts the incoming channel
4468         /// and (if the counterparty agrees), enables forwarding of payments immediately.
4469         ///
4470         /// This fully trusts that the counterparty has honestly and correctly constructed the funding
4471         /// transaction and blindly assumes that it will eventually confirm.
4472         ///
4473         /// If it does not confirm before we decide to close the channel, or if the funding transaction
4474         /// does not pay to the correct script the correct amount, *you will lose funds*.
4475         ///
4476         /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
4477         /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
4478         pub fn accept_inbound_channel_from_trusted_peer_0conf(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u128) -> Result<(), APIError> {
4479                 self.do_accept_inbound_channel(temporary_channel_id, counterparty_node_id, true, user_channel_id)
4480         }
4481
4482         fn do_accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, accept_0conf: bool, user_channel_id: u128) -> Result<(), APIError> {
4483                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
4484
4485                 let peers_without_funded_channels = self.peers_without_funded_channels(|peer| !peer.channel_by_id.is_empty());
4486                 let per_peer_state = self.per_peer_state.read().unwrap();
4487                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4488                         .ok_or_else(|| APIError::ChannelUnavailable { err: format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id) })?;
4489                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4490                 let peer_state = &mut *peer_state_lock;
4491                 let is_only_peer_channel = peer_state.channel_by_id.len() == 1;
4492                 match peer_state.channel_by_id.entry(temporary_channel_id.clone()) {
4493                         hash_map::Entry::Occupied(mut channel) => {
4494                                 if !channel.get().inbound_is_awaiting_accept() {
4495                                         return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
4496                                 }
4497                                 if accept_0conf {
4498                                         channel.get_mut().set_0conf();
4499                                 } else if channel.get().get_channel_type().requires_zero_conf() {
4500                                         let send_msg_err_event = events::MessageSendEvent::HandleError {
4501                                                 node_id: channel.get().get_counterparty_node_id(),
4502                                                 action: msgs::ErrorAction::SendErrorMessage{
4503                                                         msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "No zero confirmation channels accepted".to_owned(), }
4504                                                 }
4505                                         };
4506                                         peer_state.pending_msg_events.push(send_msg_err_event);
4507                                         let _ = remove_channel!(self, channel);
4508                                         return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
4509                                 } else {
4510                                         // If this peer already has some channels, a new channel won't increase our number of peers
4511                                         // with unfunded channels, so as long as we aren't over the maximum number of unfunded
4512                                         // channels per-peer we can accept channels from a peer with existing ones.
4513                                         if is_only_peer_channel && peers_without_funded_channels >= MAX_UNFUNDED_CHANNEL_PEERS {
4514                                                 let send_msg_err_event = events::MessageSendEvent::HandleError {
4515                                                         node_id: channel.get().get_counterparty_node_id(),
4516                                                         action: msgs::ErrorAction::SendErrorMessage{
4517                                                                 msg: msgs::ErrorMessage { channel_id: temporary_channel_id.clone(), data: "Have too many peers with unfunded channels, not accepting new ones".to_owned(), }
4518                                                         }
4519                                                 };
4520                                                 peer_state.pending_msg_events.push(send_msg_err_event);
4521                                                 let _ = remove_channel!(self, channel);
4522                                                 return Err(APIError::APIMisuseError { err: "Too many peers with unfunded channels, refusing to accept new ones".to_owned() });
4523                                         }
4524                                 }
4525
4526                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
4527                                         node_id: channel.get().get_counterparty_node_id(),
4528                                         msg: channel.get_mut().accept_inbound_channel(user_channel_id),
4529                                 });
4530                         }
4531                         hash_map::Entry::Vacant(_) => {
4532                                 return Err(APIError::ChannelUnavailable { err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*temporary_channel_id), counterparty_node_id) });
4533                         }
4534                 }
4535                 Ok(())
4536         }
4537
4538         /// Gets the number of peers which match the given filter and do not have any funded, outbound,
4539         /// or 0-conf channels.
4540         ///
4541         /// The filter is called for each peer and provided with the number of unfunded, inbound, and
4542         /// non-0-conf channels we have with the peer.
4543         fn peers_without_funded_channels<Filter>(&self, maybe_count_peer: Filter) -> usize
4544         where Filter: Fn(&PeerState<<SP::Target as SignerProvider>::Signer>) -> bool {
4545                 let mut peers_without_funded_channels = 0;
4546                 let best_block_height = self.best_block.read().unwrap().height();
4547                 {
4548                         let peer_state_lock = self.per_peer_state.read().unwrap();
4549                         for (_, peer_mtx) in peer_state_lock.iter() {
4550                                 let peer = peer_mtx.lock().unwrap();
4551                                 if !maybe_count_peer(&*peer) { continue; }
4552                                 let num_unfunded_channels = Self::unfunded_channel_count(&peer, best_block_height);
4553                                 if num_unfunded_channels == peer.channel_by_id.len() {
4554                                         peers_without_funded_channels += 1;
4555                                 }
4556                         }
4557                 }
4558                 return peers_without_funded_channels;
4559         }
4560
4561         fn unfunded_channel_count(
4562                 peer: &PeerState<<SP::Target as SignerProvider>::Signer>, best_block_height: u32
4563         ) -> usize {
4564                 let mut num_unfunded_channels = 0;
4565                 for (_, chan) in peer.channel_by_id.iter() {
4566                         if !chan.is_outbound() && chan.minimum_depth().unwrap_or(1) != 0 &&
4567                                 chan.get_funding_tx_confirmations(best_block_height) == 0
4568                         {
4569                                 num_unfunded_channels += 1;
4570                         }
4571                 }
4572                 num_unfunded_channels
4573         }
4574
4575         fn internal_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
4576                 if msg.chain_hash != self.genesis_hash {
4577                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
4578                 }
4579
4580                 if !self.default_configuration.accept_inbound_channels {
4581                         return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
4582                 }
4583
4584                 let mut random_bytes = [0u8; 16];
4585                 random_bytes.copy_from_slice(&self.entropy_source.get_secure_random_bytes()[..16]);
4586                 let user_channel_id = u128::from_be_bytes(random_bytes);
4587                 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
4588
4589                 // Get the number of peers with channels, but without funded ones. We don't care too much
4590                 // about peers that never open a channel, so we filter by peers that have at least one
4591                 // channel, and then limit the number of those with unfunded channels.
4592                 let channeled_peers_without_funding = self.peers_without_funded_channels(|node| !node.channel_by_id.is_empty());
4593
4594                 let per_peer_state = self.per_peer_state.read().unwrap();
4595                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4596                     .ok_or_else(|| {
4597                                 debug_assert!(false);
4598                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id.clone())
4599                         })?;
4600                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4601                 let peer_state = &mut *peer_state_lock;
4602
4603                 // If this peer already has some channels, a new channel won't increase our number of peers
4604                 // with unfunded channels, so as long as we aren't over the maximum number of unfunded
4605                 // channels per-peer we can accept channels from a peer with existing ones.
4606                 if peer_state.channel_by_id.is_empty() &&
4607                         channeled_peers_without_funding >= MAX_UNFUNDED_CHANNEL_PEERS &&
4608                         !self.default_configuration.manually_accept_inbound_channels
4609                 {
4610                         return Err(MsgHandleErrInternal::send_err_msg_no_close(
4611                                 "Have too many peers with unfunded channels, not accepting new ones".to_owned(),
4612                                 msg.temporary_channel_id.clone()));
4613                 }
4614
4615                 let best_block_height = self.best_block.read().unwrap().height();
4616                 if Self::unfunded_channel_count(peer_state, best_block_height) >= MAX_UNFUNDED_CHANS_PER_PEER {
4617                         return Err(MsgHandleErrInternal::send_err_msg_no_close(
4618                                 format!("Refusing more than {} unfunded channels.", MAX_UNFUNDED_CHANS_PER_PEER),
4619                                 msg.temporary_channel_id.clone()));
4620                 }
4621
4622                 let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.entropy_source, &self.signer_provider,
4623                         counterparty_node_id.clone(), &self.channel_type_features(), &peer_state.latest_features, msg, user_channel_id,
4624                         &self.default_configuration, best_block_height, &self.logger, outbound_scid_alias)
4625                 {
4626                         Err(e) => {
4627                                 self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
4628                                 return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
4629                         },
4630                         Ok(res) => res
4631                 };
4632                 match peer_state.channel_by_id.entry(channel.channel_id()) {
4633                         hash_map::Entry::Occupied(_) => {
4634                                 self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
4635                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision for the same peer!".to_owned(), msg.temporary_channel_id.clone()))
4636                         },
4637                         hash_map::Entry::Vacant(entry) => {
4638                                 if !self.default_configuration.manually_accept_inbound_channels {
4639                                         if channel.get_channel_type().requires_zero_conf() {
4640                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("No zero confirmation channels accepted".to_owned(), msg.temporary_channel_id.clone()));
4641                                         }
4642                                         peer_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
4643                                                 node_id: counterparty_node_id.clone(),
4644                                                 msg: channel.accept_inbound_channel(user_channel_id),
4645                                         });
4646                                 } else {
4647                                         let mut pending_events = self.pending_events.lock().unwrap();
4648                                         pending_events.push(
4649                                                 events::Event::OpenChannelRequest {
4650                                                         temporary_channel_id: msg.temporary_channel_id.clone(),
4651                                                         counterparty_node_id: counterparty_node_id.clone(),
4652                                                         funding_satoshis: msg.funding_satoshis,
4653                                                         push_msat: msg.push_msat,
4654                                                         channel_type: channel.get_channel_type().clone(),
4655                                                 }
4656                                         );
4657                                 }
4658
4659                                 entry.insert(channel);
4660                         }
4661                 }
4662                 Ok(())
4663         }
4664
4665         fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
4666                 let (value, output_script, user_id) = {
4667                         let per_peer_state = self.per_peer_state.read().unwrap();
4668                         let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4669                                 .ok_or_else(|| {
4670                                         debug_assert!(false);
4671                                         MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
4672                                 })?;
4673                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4674                         let peer_state = &mut *peer_state_lock;
4675                         match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
4676                                 hash_map::Entry::Occupied(mut chan) => {
4677                                         try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.channel_handshake_limits, &peer_state.latest_features), chan);
4678                                         (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
4679                                 },
4680                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
4681                         }
4682                 };
4683                 let mut pending_events = self.pending_events.lock().unwrap();
4684                 pending_events.push(events::Event::FundingGenerationReady {
4685                         temporary_channel_id: msg.temporary_channel_id,
4686                         counterparty_node_id: *counterparty_node_id,
4687                         channel_value_satoshis: value,
4688                         output_script,
4689                         user_channel_id: user_id,
4690                 });
4691                 Ok(())
4692         }
4693
4694         fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
4695                 let best_block = *self.best_block.read().unwrap();
4696
4697                 let per_peer_state = self.per_peer_state.read().unwrap();
4698                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4699                         .ok_or_else(|| {
4700                                 debug_assert!(false);
4701                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.temporary_channel_id)
4702                         })?;
4703
4704                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4705                 let peer_state = &mut *peer_state_lock;
4706                 let ((funding_msg, monitor), chan) =
4707                         match peer_state.channel_by_id.entry(msg.temporary_channel_id) {
4708                                 hash_map::Entry::Occupied(mut chan) => {
4709                                         (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.signer_provider, &self.logger), chan), chan.remove())
4710                                 },
4711                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
4712                         };
4713
4714                 match peer_state.channel_by_id.entry(funding_msg.channel_id) {
4715                         hash_map::Entry::Occupied(_) => {
4716                                 Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
4717                         },
4718                         hash_map::Entry::Vacant(e) => {
4719                                 match self.id_to_peer.lock().unwrap().entry(chan.channel_id()) {
4720                                         hash_map::Entry::Occupied(_) => {
4721                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close(
4722                                                         "The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
4723                                                         funding_msg.channel_id))
4724                                         },
4725                                         hash_map::Entry::Vacant(i_e) => {
4726                                                 i_e.insert(chan.get_counterparty_node_id());
4727                                         }
4728                                 }
4729
4730                                 // There's no problem signing a counterparty's funding transaction if our monitor
4731                                 // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
4732                                 // accepted payment from yet. We do, however, need to wait to send our channel_ready
4733                                 // until we have persisted our monitor.
4734                                 let new_channel_id = funding_msg.channel_id;
4735                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
4736                                         node_id: counterparty_node_id.clone(),
4737                                         msg: funding_msg,
4738                                 });
4739
4740                                 let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
4741
4742                                 let chan = e.insert(chan);
4743                                 let mut res = handle_new_monitor_update!(self, monitor_res, 0, peer_state_lock, peer_state,
4744                                         per_peer_state, chan, MANUALLY_REMOVING, { peer_state.channel_by_id.remove(&new_channel_id) });
4745
4746                                 // Note that we reply with the new channel_id in error messages if we gave up on the
4747                                 // channel, not the temporary_channel_id. This is compatible with ourselves, but the
4748                                 // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
4749                                 // any messages referencing a previously-closed channel anyway.
4750                                 // We do not propagate the monitor update to the user as it would be for a monitor
4751                                 // that we didn't manage to store (and that we don't care about - we don't respond
4752                                 // with the funding_signed so the channel can never go on chain).
4753                                 if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
4754                                         res.0 = None;
4755                                 }
4756                                 res
4757                         }
4758                 }
4759         }
4760
4761         fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
4762                 let best_block = *self.best_block.read().unwrap();
4763                 let per_peer_state = self.per_peer_state.read().unwrap();
4764                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4765                         .ok_or_else(|| {
4766                                 debug_assert!(false);
4767                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4768                         })?;
4769
4770                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4771                 let peer_state = &mut *peer_state_lock;
4772                 match peer_state.channel_by_id.entry(msg.channel_id) {
4773                         hash_map::Entry::Occupied(mut chan) => {
4774                                 let monitor = try_chan_entry!(self,
4775                                         chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan);
4776                                 let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
4777                                 let mut res = handle_new_monitor_update!(self, update_res, 0, peer_state_lock, peer_state, per_peer_state, chan);
4778                                 if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4779                                         // We weren't able to watch the channel to begin with, so no updates should be made on
4780                                         // it. Previously, full_stack_target found an (unreachable) panic when the
4781                                         // monitor update contained within `shutdown_finish` was applied.
4782                                         if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4783                                                 shutdown_finish.0.take();
4784                                         }
4785                                 }
4786                                 res
4787                         },
4788                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4789                 }
4790         }
4791
4792         fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {
4793                 let per_peer_state = self.per_peer_state.read().unwrap();
4794                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4795                         .ok_or_else(|| {
4796                                 debug_assert!(false);
4797                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4798                         })?;
4799                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4800                 let peer_state = &mut *peer_state_lock;
4801                 match peer_state.channel_by_id.entry(msg.channel_id) {
4802                         hash_map::Entry::Occupied(mut chan) => {
4803                                 let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().channel_ready(&msg, &self.node_signer,
4804                                         self.genesis_hash.clone(), &self.default_configuration, &self.best_block.read().unwrap(), &self.logger), chan);
4805                                 if let Some(announcement_sigs) = announcement_sigs_opt {
4806                                         log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
4807                                         peer_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
4808                                                 node_id: counterparty_node_id.clone(),
4809                                                 msg: announcement_sigs,
4810                                         });
4811                                 } else if chan.get().is_usable() {
4812                                         // If we're sending an announcement_signatures, we'll send the (public)
4813                                         // channel_update after sending a channel_announcement when we receive our
4814                                         // counterparty's announcement_signatures. Thus, we only bother to send a
4815                                         // channel_update here if the channel is not public, i.e. we're not sending an
4816                                         // announcement_signatures.
4817                                         log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
4818                                         if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
4819                                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
4820                                                         node_id: counterparty_node_id.clone(),
4821                                                         msg,
4822                                                 });
4823                                         }
4824                                 }
4825
4826                                 {
4827                                         let mut pending_events = self.pending_events.lock().unwrap();
4828                                         emit_channel_ready_event!(pending_events, chan.get_mut());
4829                                 }
4830
4831                                 Ok(())
4832                         },
4833                         hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
4834                 }
4835         }
4836
4837         fn internal_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
4838                 let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
4839                 let result: Result<(), _> = loop {
4840                         let per_peer_state = self.per_peer_state.read().unwrap();
4841                         let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4842                                 .ok_or_else(|| {
4843                                         debug_assert!(false);
4844                                         MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4845                                 })?;
4846                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4847                         let peer_state = &mut *peer_state_lock;
4848                         match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
4849                                 hash_map::Entry::Occupied(mut chan_entry) => {
4850
4851                                         if !chan_entry.get().received_shutdown() {
4852                                                 log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
4853                                                         log_bytes!(msg.channel_id),
4854                                                         if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
4855                                         }
4856
4857                                         let funding_txo_opt = chan_entry.get().get_funding_txo();
4858                                         let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
4859                                                 chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
4860                                         dropped_htlcs = htlcs;
4861
4862                                         if let Some(msg) = shutdown {
4863                                                 // We can send the `shutdown` message before updating the `ChannelMonitor`
4864                                                 // here as we don't need the monitor update to complete until we send a
4865                                                 // `shutdown_signed`, which we'll delay if we're pending a monitor update.
4866                                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
4867                                                         node_id: *counterparty_node_id,
4868                                                         msg,
4869                                                 });
4870                                         }
4871
4872                                         // Update the monitor with the shutdown script if necessary.
4873                                         if let Some(monitor_update) = monitor_update_opt {
4874                                                 let update_id = monitor_update.update_id;
4875                                                 let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
4876                                                 break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, per_peer_state, chan_entry);
4877                                         }
4878                                         break Ok(());
4879                                 },
4880                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
4881                         }
4882                 };
4883                 for htlc_source in dropped_htlcs.drain(..) {
4884                         let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id.clone()), channel_id: msg.channel_id };
4885                         let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
4886                         self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
4887                 }
4888
4889                 result
4890         }
4891
4892         fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
4893                 let per_peer_state = self.per_peer_state.read().unwrap();
4894                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4895                         .ok_or_else(|| {
4896                                 debug_assert!(false);
4897                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4898                         })?;
4899                 let (tx, chan_option) = {
4900                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4901                         let peer_state = &mut *peer_state_lock;
4902                         match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
4903                                 hash_map::Entry::Occupied(mut chan_entry) => {
4904                                         let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), chan_entry);
4905                                         if let Some(msg) = closing_signed {
4906                                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
4907                                                         node_id: counterparty_node_id.clone(),
4908                                                         msg,
4909                                                 });
4910                                         }
4911                                         if tx.is_some() {
4912                                                 // We're done with this channel, we've got a signed closing transaction and
4913                                                 // will send the closing_signed back to the remote peer upon return. This
4914                                                 // also implies there are no pending HTLCs left on the channel, so we can
4915                                                 // fully delete it from tracking (the channel monitor is still around to
4916                                                 // watch for old state broadcasts)!
4917                                                 (tx, Some(remove_channel!(self, chan_entry)))
4918                                         } else { (tx, None) }
4919                                 },
4920                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
4921                         }
4922                 };
4923                 if let Some(broadcast_tx) = tx {
4924                         log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
4925                         self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
4926                 }
4927                 if let Some(chan) = chan_option {
4928                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
4929                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4930                                 let peer_state = &mut *peer_state_lock;
4931                                 peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
4932                                         msg: update
4933                                 });
4934                         }
4935                         self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
4936                 }
4937                 Ok(())
4938         }
4939
4940         fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
4941                 //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
4942                 //determine the state of the payment based on our response/if we forward anything/the time
4943                 //we take to respond. We should take care to avoid allowing such an attack.
4944                 //
4945                 //TODO: There exists a further attack where a node may garble the onion data, forward it to
4946                 //us repeatedly garbled in different ways, and compare our error messages, which are
4947                 //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
4948                 //but we should prevent it anyway.
4949
4950                 let pending_forward_info = self.decode_update_add_htlc_onion(msg);
4951                 let per_peer_state = self.per_peer_state.read().unwrap();
4952                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4953                         .ok_or_else(|| {
4954                                 debug_assert!(false);
4955                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4956                         })?;
4957                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
4958                 let peer_state = &mut *peer_state_lock;
4959                 match peer_state.channel_by_id.entry(msg.channel_id) {
4960                         hash_map::Entry::Occupied(mut chan) => {
4961
4962                                 let create_pending_htlc_status = |chan: &Channel<<SP::Target as SignerProvider>::Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
4963                                         // If the update_add is completely bogus, the call will Err and we will close,
4964                                         // but if we've sent a shutdown and they haven't acknowledged it yet, we just
4965                                         // want to reject the new HTLC and fail it backwards instead of forwarding.
4966                                         match pending_forward_info {
4967                                                 PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
4968                                                         let reason = if (error_code & 0x1000) != 0 {
4969                                                                 let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
4970                                                                 HTLCFailReason::reason(real_code, error_data)
4971                                                         } else {
4972                                                                 HTLCFailReason::from_failure_code(error_code)
4973                                                         }.get_encrypted_failure_packet(incoming_shared_secret, &None);
4974                                                         let msg = msgs::UpdateFailHTLC {
4975                                                                 channel_id: msg.channel_id,
4976                                                                 htlc_id: msg.htlc_id,
4977                                                                 reason
4978                                                         };
4979                                                         PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
4980                                                 },
4981                                                 _ => pending_forward_info
4982                                         }
4983                                 };
4984                                 try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), chan);
4985                         },
4986                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
4987                 }
4988                 Ok(())
4989         }
4990
4991         fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
4992                 let (htlc_source, forwarded_htlc_value) = {
4993                         let per_peer_state = self.per_peer_state.read().unwrap();
4994                         let peer_state_mutex = per_peer_state.get(counterparty_node_id)
4995                                 .ok_or_else(|| {
4996                                         debug_assert!(false);
4997                                         MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
4998                                 })?;
4999                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5000                         let peer_state = &mut *peer_state_lock;
5001                         match peer_state.channel_by_id.entry(msg.channel_id) {
5002                                 hash_map::Entry::Occupied(mut chan) => {
5003                                         try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
5004                                 },
5005                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5006                         }
5007                 };
5008                 self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
5009                 Ok(())
5010         }
5011
5012         fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
5013                 let per_peer_state = self.per_peer_state.read().unwrap();
5014                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5015                         .ok_or_else(|| {
5016                                 debug_assert!(false);
5017                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
5018                         })?;
5019                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5020                 let peer_state = &mut *peer_state_lock;
5021                 match peer_state.channel_by_id.entry(msg.channel_id) {
5022                         hash_map::Entry::Occupied(mut chan) => {
5023                                 try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::from_msg(msg)), chan);
5024                         },
5025                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5026                 }
5027                 Ok(())
5028         }
5029
5030         fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
5031                 let per_peer_state = self.per_peer_state.read().unwrap();
5032                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5033                         .ok_or_else(|| {
5034                                 debug_assert!(false);
5035                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
5036                         })?;
5037                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5038                 let peer_state = &mut *peer_state_lock;
5039                 match peer_state.channel_by_id.entry(msg.channel_id) {
5040                         hash_map::Entry::Occupied(mut chan) => {
5041                                 if (msg.failure_code & 0x8000) == 0 {
5042                                         let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
5043                                         try_chan_entry!(self, Err(chan_err), chan);
5044                                 }
5045                                 try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::reason(msg.failure_code, msg.sha256_of_onion.to_vec())), chan);
5046                                 Ok(())
5047                         },
5048                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5049                 }
5050         }
5051
5052         fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
5053                 let per_peer_state = self.per_peer_state.read().unwrap();
5054                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5055                         .ok_or_else(|| {
5056                                 debug_assert!(false);
5057                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
5058                         })?;
5059                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5060                 let peer_state = &mut *peer_state_lock;
5061                 match peer_state.channel_by_id.entry(msg.channel_id) {
5062                         hash_map::Entry::Occupied(mut chan) => {
5063                                 let funding_txo = chan.get().get_funding_txo();
5064                                 let monitor_update = try_chan_entry!(self, chan.get_mut().commitment_signed(&msg, &self.logger), chan);
5065                                 let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
5066                                 let update_id = monitor_update.update_id;
5067                                 handle_new_monitor_update!(self, update_res, update_id, peer_state_lock,
5068                                         peer_state, per_peer_state, chan)
5069                         },
5070                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5071                 }
5072         }
5073
5074         #[inline]
5075         fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) {
5076                 for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards {
5077                         let mut push_forward_event = false;
5078                         let mut new_intercept_events = Vec::new();
5079                         let mut failed_intercept_forwards = Vec::new();
5080                         if !pending_forwards.is_empty() {
5081                                 for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
5082                                         let scid = match forward_info.routing {
5083                                                 PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
5084                                                 PendingHTLCRouting::Receive { .. } => 0,
5085                                                 PendingHTLCRouting::ReceiveKeysend { .. } => 0,
5086                                         };
5087                                         // Pull this now to avoid introducing a lock order with `forward_htlcs`.
5088                                         let is_our_scid = self.short_to_chan_info.read().unwrap().contains_key(&scid);
5089
5090                                         let mut forward_htlcs = self.forward_htlcs.lock().unwrap();
5091                                         let forward_htlcs_empty = forward_htlcs.is_empty();
5092                                         match forward_htlcs.entry(scid) {
5093                                                 hash_map::Entry::Occupied(mut entry) => {
5094                                                         entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5095                                                                 prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }));
5096                                                 },
5097                                                 hash_map::Entry::Vacant(entry) => {
5098                                                         if !is_our_scid && forward_info.incoming_amt_msat.is_some() &&
5099                                                            fake_scid::is_valid_intercept(&self.fake_scid_rand_bytes, scid, &self.genesis_hash)
5100                                                         {
5101                                                                 let intercept_id = InterceptId(Sha256::hash(&forward_info.incoming_shared_secret).into_inner());
5102                                                                 let mut pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
5103                                                                 match pending_intercepts.entry(intercept_id) {
5104                                                                         hash_map::Entry::Vacant(entry) => {
5105                                                                                 new_intercept_events.push(events::Event::HTLCIntercepted {
5106                                                                                         requested_next_hop_scid: scid,
5107                                                                                         payment_hash: forward_info.payment_hash,
5108                                                                                         inbound_amount_msat: forward_info.incoming_amt_msat.unwrap(),
5109                                                                                         expected_outbound_amount_msat: forward_info.outgoing_amt_msat,
5110                                                                                         intercept_id
5111                                                                                 });
5112                                                                                 entry.insert(PendingAddHTLCInfo {
5113                                                                                         prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info });
5114                                                                         },
5115                                                                         hash_map::Entry::Occupied(_) => {
5116                                                                                 log_info!(self.logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid);
5117                                                                                 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
5118                                                                                         short_channel_id: prev_short_channel_id,
5119                                                                                         outpoint: prev_funding_outpoint,
5120                                                                                         htlc_id: prev_htlc_id,
5121                                                                                         incoming_packet_shared_secret: forward_info.incoming_shared_secret,
5122                                                                                         phantom_shared_secret: None,
5123                                                                                 });
5124
5125                                                                                 failed_intercept_forwards.push((htlc_source, forward_info.payment_hash,
5126                                                                                                 HTLCFailReason::from_failure_code(0x4000 | 10),
5127                                                                                                 HTLCDestination::InvalidForward { requested_forward_scid: scid },
5128                                                                                 ));
5129                                                                         }
5130                                                                 }
5131                                                         } else {
5132                                                                 // We don't want to generate a PendingHTLCsForwardable event if only intercepted
5133                                                                 // payments are being processed.
5134                                                                 if forward_htlcs_empty {
5135                                                                         push_forward_event = true;
5136                                                                 }
5137                                                                 entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo {
5138                                                                         prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })));
5139                                                         }
5140                                                 }
5141                                         }
5142                                 }
5143                         }
5144
5145                         for (htlc_source, payment_hash, failure_reason, destination) in failed_intercept_forwards.drain(..) {
5146                                 self.fail_htlc_backwards_internal(&htlc_source, &payment_hash, &failure_reason, destination);
5147                         }
5148
5149                         if !new_intercept_events.is_empty() {
5150                                 let mut events = self.pending_events.lock().unwrap();
5151                                 events.append(&mut new_intercept_events);
5152                         }
5153                         if push_forward_event { self.push_pending_forwards_ev() }
5154                 }
5155         }
5156
5157         // We only want to push a PendingHTLCsForwardable event if no others are queued.
5158         fn push_pending_forwards_ev(&self) {
5159                 let mut pending_events = self.pending_events.lock().unwrap();
5160                 let forward_ev_exists = pending_events.iter()
5161                         .find(|ev| if let events::Event::PendingHTLCsForwardable { .. } = ev { true } else { false })
5162                         .is_some();
5163                 if !forward_ev_exists {
5164                         pending_events.push(events::Event::PendingHTLCsForwardable {
5165                                 time_forwardable:
5166                                         Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS),
5167                         });
5168                 }
5169         }
5170
5171         fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
5172                 let (htlcs_to_fail, res) = {
5173                         let per_peer_state = self.per_peer_state.read().unwrap();
5174                         let mut peer_state_lock = per_peer_state.get(counterparty_node_id)
5175                                 .ok_or_else(|| {
5176                                         debug_assert!(false);
5177                                         MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
5178                                 }).map(|mtx| mtx.lock().unwrap())?;
5179                         let peer_state = &mut *peer_state_lock;
5180                         match peer_state.channel_by_id.entry(msg.channel_id) {
5181                                 hash_map::Entry::Occupied(mut chan) => {
5182                                         let funding_txo = chan.get().get_funding_txo();
5183                                         let (htlcs_to_fail, monitor_update) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
5184                                         let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
5185                                         let update_id = monitor_update.update_id;
5186                                         let res = handle_new_monitor_update!(self, update_res, update_id,
5187                                                 peer_state_lock, peer_state, per_peer_state, chan);
5188                                         (htlcs_to_fail, res)
5189                                 },
5190                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5191                         }
5192                 };
5193                 self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id, counterparty_node_id);
5194                 res
5195         }
5196
5197         fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
5198                 let per_peer_state = self.per_peer_state.read().unwrap();
5199                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5200                         .ok_or_else(|| {
5201                                 debug_assert!(false);
5202                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
5203                         })?;
5204                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5205                 let peer_state = &mut *peer_state_lock;
5206                 match peer_state.channel_by_id.entry(msg.channel_id) {
5207                         hash_map::Entry::Occupied(mut chan) => {
5208                                 try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg, &self.logger), chan);
5209                         },
5210                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5211                 }
5212                 Ok(())
5213         }
5214
5215         fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
5216                 let per_peer_state = self.per_peer_state.read().unwrap();
5217                 let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5218                         .ok_or_else(|| {
5219                                 debug_assert!(false);
5220                                 MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
5221                         })?;
5222                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5223                 let peer_state = &mut *peer_state_lock;
5224                 match peer_state.channel_by_id.entry(msg.channel_id) {
5225                         hash_map::Entry::Occupied(mut chan) => {
5226                                 if !chan.get().is_usable() {
5227                                         return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
5228                                 }
5229
5230                                 peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
5231                                         msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
5232                                                 &self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(),
5233                                                 msg, &self.default_configuration
5234                                         ), chan),
5235                                         // Note that announcement_signatures fails if the channel cannot be announced,
5236                                         // so get_channel_update_for_broadcast will never fail by the time we get here.
5237                                         update_msg: Some(self.get_channel_update_for_broadcast(chan.get()).unwrap()),
5238                                 });
5239                         },
5240                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5241                 }
5242                 Ok(())
5243         }
5244
5245         /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
5246         fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
5247                 let (chan_counterparty_node_id, chan_id) = match self.short_to_chan_info.read().unwrap().get(&msg.contents.short_channel_id) {
5248                         Some((cp_id, chan_id)) => (cp_id.clone(), chan_id.clone()),
5249                         None => {
5250                                 // It's not a local channel
5251                                 return Ok(NotifyOption::SkipPersist)
5252                         }
5253                 };
5254                 let per_peer_state = self.per_peer_state.read().unwrap();
5255                 let peer_state_mutex_opt = per_peer_state.get(&chan_counterparty_node_id);
5256                 if peer_state_mutex_opt.is_none() {
5257                         return Ok(NotifyOption::SkipPersist)
5258                 }
5259                 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
5260                 let peer_state = &mut *peer_state_lock;
5261                 match peer_state.channel_by_id.entry(chan_id) {
5262                         hash_map::Entry::Occupied(mut chan) => {
5263                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
5264                                         if chan.get().should_announce() {
5265                                                 // If the announcement is about a channel of ours which is public, some
5266                                                 // other peer may simply be forwarding all its gossip to us. Don't provide
5267                                                 // a scary-looking error message and return Ok instead.
5268                                                 return Ok(NotifyOption::SkipPersist);
5269                                         }
5270                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
5271                                 }
5272                                 let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
5273                                 let msg_from_node_one = msg.contents.flags & 1 == 0;
5274                                 if were_node_one == msg_from_node_one {
5275                                         return Ok(NotifyOption::SkipPersist);
5276                                 } else {
5277                                         log_debug!(self.logger, "Received channel_update for channel {}.", log_bytes!(chan_id));
5278                                         try_chan_entry!(self, chan.get_mut().channel_update(&msg), chan);
5279                                 }
5280                         },
5281                         hash_map::Entry::Vacant(_) => return Ok(NotifyOption::SkipPersist)
5282                 }
5283                 Ok(NotifyOption::DoPersist)
5284         }
5285
5286         fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
5287                 let htlc_forwards;
5288                 let need_lnd_workaround = {
5289                         let per_peer_state = self.per_peer_state.read().unwrap();
5290
5291                         let peer_state_mutex = per_peer_state.get(counterparty_node_id)
5292                                 .ok_or_else(|| {
5293                                         debug_assert!(false);
5294                                         MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
5295                                 })?;
5296                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5297                         let peer_state = &mut *peer_state_lock;
5298                         match peer_state.channel_by_id.entry(msg.channel_id) {
5299                                 hash_map::Entry::Occupied(mut chan) => {
5300                                         // Currently, we expect all holding cell update_adds to be dropped on peer
5301                                         // disconnect, so Channel's reestablish will never hand us any holding cell
5302                                         // freed HTLCs to fail backwards. If in the future we no longer drop pending
5303                                         // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
5304                                         let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
5305                                                 msg, &self.logger, &self.node_signer, self.genesis_hash,
5306                                                 &self.default_configuration, &*self.best_block.read().unwrap()), chan);
5307                                         let mut channel_update = None;
5308                                         if let Some(msg) = responses.shutdown_msg {
5309                                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
5310                                                         node_id: counterparty_node_id.clone(),
5311                                                         msg,
5312                                                 });
5313                                         } else if chan.get().is_usable() {
5314                                                 // If the channel is in a usable state (ie the channel is not being shut
5315                                                 // down), send a unicast channel_update to our counterparty to make sure
5316                                                 // they have the latest channel parameters.
5317                                                 if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
5318                                                         channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
5319                                                                 node_id: chan.get().get_counterparty_node_id(),
5320                                                                 msg,
5321                                                         });
5322                                                 }
5323                                         }
5324                                         let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
5325                                         htlc_forwards = self.handle_channel_resumption(
5326                                                 &mut peer_state.pending_msg_events, chan.get_mut(), responses.raa, responses.commitment_update, responses.order,
5327                                                 Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
5328                                         if let Some(upd) = channel_update {
5329                                                 peer_state.pending_msg_events.push(upd);
5330                                         }
5331                                         need_lnd_workaround
5332                                 },
5333                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
5334                         }
5335                 };
5336
5337                 if let Some(forwards) = htlc_forwards {
5338                         self.forward_htlcs(&mut [forwards][..]);
5339                 }
5340
5341                 if let Some(channel_ready_msg) = need_lnd_workaround {
5342                         self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;
5343                 }
5344                 Ok(())
5345         }
5346
5347         /// Process pending events from the [`chain::Watch`], returning whether any events were processed.
5348         fn process_pending_monitor_events(&self) -> bool {
5349                 debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock
5350
5351                 let mut failed_channels = Vec::new();
5352                 let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
5353                 let has_pending_monitor_events = !pending_monitor_events.is_empty();
5354                 for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) {
5355                         for monitor_event in monitor_events.drain(..) {
5356                                 match monitor_event {
5357                                         MonitorEvent::HTLCEvent(htlc_update) => {
5358                                                 if let Some(preimage) = htlc_update.payment_preimage {
5359                                                         log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
5360                                                         self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
5361                                                 } else {
5362                                                         log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
5363                                                         let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
5364                                                         let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
5365                                                         self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver);
5366                                                 }
5367                                         },
5368                                         MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
5369                                         MonitorEvent::UpdateFailed(funding_outpoint) => {
5370                                                 let counterparty_node_id_opt = match counterparty_node_id {
5371                                                         Some(cp_id) => Some(cp_id),
5372                                                         None => {
5373                                                                 // TODO: Once we can rely on the counterparty_node_id from the
5374                                                                 // monitor event, this and the id_to_peer map should be removed.
5375                                                                 let id_to_peer = self.id_to_peer.lock().unwrap();
5376                                                                 id_to_peer.get(&funding_outpoint.to_channel_id()).cloned()
5377                                                         }
5378                                                 };
5379                                                 if let Some(counterparty_node_id) = counterparty_node_id_opt {
5380                                                         let per_peer_state = self.per_peer_state.read().unwrap();
5381                                                         if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
5382                                                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5383                                                                 let peer_state = &mut *peer_state_lock;
5384                                                                 let pending_msg_events = &mut peer_state.pending_msg_events;
5385                                                                 if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
5386                                                                         let mut chan = remove_channel!(self, chan_entry);
5387                                                                         failed_channels.push(chan.force_shutdown(false));
5388                                                                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
5389                                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
5390                                                                                         msg: update
5391                                                                                 });
5392                                                                         }
5393                                                                         let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
5394                                                                                 ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
5395                                                                         } else {
5396                                                                                 ClosureReason::CommitmentTxConfirmed
5397                                                                         };
5398                                                                         self.issue_channel_close_events(&chan, reason);
5399                                                                         pending_msg_events.push(events::MessageSendEvent::HandleError {
5400                                                                                 node_id: chan.get_counterparty_node_id(),
5401                                                                                 action: msgs::ErrorAction::SendErrorMessage {
5402                                                                                         msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
5403                                                                                 },
5404                                                                         });
5405                                                                 }
5406                                                         }
5407                                                 }
5408                                         },
5409                                         MonitorEvent::Completed { funding_txo, monitor_update_id } => {
5410                                                 self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref());
5411                                         },
5412                                 }
5413                         }
5414                 }
5415
5416                 for failure in failed_channels.drain(..) {
5417                         self.finish_force_close_channel(failure);
5418                 }
5419
5420                 has_pending_monitor_events
5421         }
5422
5423         /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
5424         /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
5425         /// update events as a separate process method here.
5426         #[cfg(fuzzing)]
5427         pub fn process_monitor_events(&self) {
5428                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
5429                         if self.process_pending_monitor_events() {
5430                                 NotifyOption::DoPersist
5431                         } else {
5432                                 NotifyOption::SkipPersist
5433                         }
5434                 });
5435         }
5436
5437         /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
5438         /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
5439         /// update was applied.
5440         fn check_free_holding_cells(&self) -> bool {
5441                 let mut has_monitor_update = false;
5442                 let mut failed_htlcs = Vec::new();
5443                 let mut handle_errors = Vec::new();
5444
5445                 // Walk our list of channels and find any that need to update. Note that when we do find an
5446                 // update, if it includes actions that must be taken afterwards, we have to drop the
5447                 // per-peer state lock as well as the top level per_peer_state lock. Thus, we loop until we
5448                 // manage to go through all our peers without finding a single channel to update.
5449                 'peer_loop: loop {
5450                         let per_peer_state = self.per_peer_state.read().unwrap();
5451                         for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
5452                                 'chan_loop: loop {
5453                                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5454                                         let peer_state: &mut PeerState<_> = &mut *peer_state_lock;
5455                                         for (channel_id, chan) in peer_state.channel_by_id.iter_mut() {
5456                                                 let counterparty_node_id = chan.get_counterparty_node_id();
5457                                                 let funding_txo = chan.get_funding_txo();
5458                                                 let (monitor_opt, holding_cell_failed_htlcs) =
5459                                                         chan.maybe_free_holding_cell_htlcs(&self.logger);
5460                                                 if !holding_cell_failed_htlcs.is_empty() {
5461                                                         failed_htlcs.push((holding_cell_failed_htlcs, *channel_id, counterparty_node_id));
5462                                                 }
5463                                                 if let Some(monitor_update) = monitor_opt {
5464                                                         has_monitor_update = true;
5465
5466                                                         let update_res = self.chain_monitor.update_channel(
5467                                                                 funding_txo.expect("channel is live"), monitor_update);
5468                                                         let update_id = monitor_update.update_id;
5469                                                         let channel_id: [u8; 32] = *channel_id;
5470                                                         let res = handle_new_monitor_update!(self, update_res, update_id,
5471                                                                 peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
5472                                                                 peer_state.channel_by_id.remove(&channel_id));
5473                                                         if res.is_err() {
5474                                                                 handle_errors.push((counterparty_node_id, res));
5475                                                         }
5476                                                         continue 'peer_loop;
5477                                                 }
5478                                         }
5479                                         break 'chan_loop;
5480                                 }
5481                         }
5482                         break 'peer_loop;
5483                 }
5484
5485                 let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
5486                 for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
5487                         self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
5488                 }
5489
5490                 for (counterparty_node_id, err) in handle_errors.drain(..) {
5491                         let _ = handle_error!(self, err, counterparty_node_id);
5492                 }
5493
5494                 has_update
5495         }
5496
5497         /// Check whether any channels have finished removing all pending updates after a shutdown
5498         /// exchange and can now send a closing_signed.
5499         /// Returns whether any closing_signed messages were generated.
5500         fn maybe_generate_initial_closing_signed(&self) -> bool {
5501                 let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
5502                 let mut has_update = false;
5503                 {
5504                         let per_peer_state = self.per_peer_state.read().unwrap();
5505
5506                         for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
5507                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5508                                 let peer_state = &mut *peer_state_lock;
5509                                 let pending_msg_events = &mut peer_state.pending_msg_events;
5510                                 peer_state.channel_by_id.retain(|channel_id, chan| {
5511                                         match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
5512                                                 Ok((msg_opt, tx_opt)) => {
5513                                                         if let Some(msg) = msg_opt {
5514                                                                 has_update = true;
5515                                                                 pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
5516                                                                         node_id: chan.get_counterparty_node_id(), msg,
5517                                                                 });
5518                                                         }
5519                                                         if let Some(tx) = tx_opt {
5520                                                                 // We're done with this channel. We got a closing_signed and sent back
5521                                                                 // a closing_signed with a closing transaction to broadcast.
5522                                                                 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
5523                                                                         pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
5524                                                                                 msg: update
5525                                                                         });
5526                                                                 }
5527
5528                                                                 self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
5529
5530                                                                 log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
5531                                                                 self.tx_broadcaster.broadcast_transaction(&tx);
5532                                                                 update_maps_on_chan_removal!(self, chan);
5533                                                                 false
5534                                                         } else { true }
5535                                                 },
5536                                                 Err(e) => {
5537                                                         has_update = true;
5538                                                         let (close_channel, res) = convert_chan_err!(self, e, chan, channel_id);
5539                                                         handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
5540                                                         !close_channel
5541                                                 }
5542                                         }
5543                                 });
5544                         }
5545                 }
5546
5547                 for (counterparty_node_id, err) in handle_errors.drain(..) {
5548                         let _ = handle_error!(self, err, counterparty_node_id);
5549                 }
5550
5551                 has_update
5552         }
5553
5554         /// Handle a list of channel failures during a block_connected or block_disconnected call,
5555         /// pushing the channel monitor update (if any) to the background events queue and removing the
5556         /// Channel object.
5557         fn handle_init_event_channel_failures(&self, mut failed_channels: Vec<ShutdownResult>) {
5558                 for mut failure in failed_channels.drain(..) {
5559                         // Either a commitment transactions has been confirmed on-chain or
5560                         // Channel::block_disconnected detected that the funding transaction has been
5561                         // reorganized out of the main chain.
5562                         // We cannot broadcast our latest local state via monitor update (as
5563                         // Channel::force_shutdown tries to make us do) as we may still be in initialization,
5564                         // so we track the update internally and handle it when the user next calls
5565                         // timer_tick_occurred, guaranteeing we're running normally.
5566                         if let Some((funding_txo, update)) = failure.0.take() {
5567                                 assert_eq!(update.updates.len(), 1);
5568                                 if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
5569                                         assert!(should_broadcast);
5570                                 } else { unreachable!(); }
5571                                 self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
5572                         }
5573                         self.finish_force_close_channel(failure);
5574                 }
5575         }
5576
5577         fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
5578                 assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106
5579
5580                 if min_value_msat.is_some() && min_value_msat.unwrap() > MAX_VALUE_MSAT {
5581                         return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) });
5582                 }
5583
5584                 let payment_secret = PaymentSecret(self.entropy_source.get_secure_random_bytes());
5585
5586                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5587                 let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
5588                 match payment_secrets.entry(payment_hash) {
5589                         hash_map::Entry::Vacant(e) => {
5590                                 e.insert(PendingInboundPayment {
5591                                         payment_secret, min_value_msat, payment_preimage,
5592                                         user_payment_id: 0, // For compatibility with version 0.0.103 and earlier
5593                                         // We assume that highest_seen_timestamp is pretty close to the current time -
5594                                         // it's updated when we receive a new block with the maximum time we've seen in
5595                                         // a header. It should never be more than two hours in the future.
5596                                         // Thus, we add two hours here as a buffer to ensure we absolutely
5597                                         // never fail a payment too early.
5598                                         // Note that we assume that received blocks have reasonably up-to-date
5599                                         // timestamps.
5600                                         expiry_time: self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + invoice_expiry_delta_secs as u64 + 7200,
5601                                 });
5602                         },
5603                         hash_map::Entry::Occupied(_) => return Err(APIError::APIMisuseError { err: "Duplicate payment hash".to_owned() }),
5604                 }
5605                 Ok(payment_secret)
5606         }
5607
5608         /// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
5609         /// to pay us.
5610         ///
5611         /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
5612         /// [`PaymentHash`] and [`PaymentPreimage`] for you.
5613         ///
5614         /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentClaimable`], which
5615         /// will have the [`PaymentClaimable::purpose`] be [`PaymentPurpose::InvoicePayment`] with
5616         /// its [`PaymentPurpose::InvoicePayment::payment_preimage`] field filled in. That should then be
5617         /// passed directly to [`claim_funds`].
5618         ///
5619         /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
5620         ///
5621         /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
5622         /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
5623         ///
5624         /// # Note
5625         ///
5626         /// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
5627         /// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
5628         ///
5629         /// Errors if `min_value_msat` is greater than total bitcoin supply.
5630         ///
5631         /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
5632         /// on versions of LDK prior to 0.0.114.
5633         ///
5634         /// [`claim_funds`]: Self::claim_funds
5635         /// [`PaymentClaimable`]: events::Event::PaymentClaimable
5636         /// [`PaymentClaimable::purpose`]: events::Event::PaymentClaimable::purpose
5637         /// [`PaymentPurpose::InvoicePayment`]: events::PaymentPurpose::InvoicePayment
5638         /// [`PaymentPurpose::InvoicePayment::payment_preimage`]: events::PaymentPurpose::InvoicePayment::payment_preimage
5639         /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
5640         pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32,
5641                 min_final_cltv_expiry_delta: Option<u16>) -> Result<(PaymentHash, PaymentSecret), ()> {
5642                 inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs,
5643                         &self.entropy_source, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
5644                         min_final_cltv_expiry_delta)
5645         }
5646
5647         /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share
5648         /// serialized state with LDK node(s) running 0.0.103 and earlier.
5649         ///
5650         /// May panic if `invoice_expiry_delta_secs` is greater than one year.
5651         ///
5652         /// # Note
5653         /// This method is deprecated and will be removed soon.
5654         ///
5655         /// [`create_inbound_payment`]: Self::create_inbound_payment
5656         #[deprecated]
5657         pub fn create_inbound_payment_legacy(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> {
5658                 let payment_preimage = PaymentPreimage(self.entropy_source.get_secure_random_bytes());
5659                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
5660                 let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?;
5661                 Ok((payment_hash, payment_secret))
5662         }
5663
5664         /// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
5665         /// stored external to LDK.
5666         ///
5667         /// A [`PaymentClaimable`] event will only be generated if the [`PaymentSecret`] matches a
5668         /// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
5669         /// the `min_value_msat` provided here, if one is provided.
5670         ///
5671         /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) should be globally unique, though
5672         /// note that LDK will not stop you from registering duplicate payment hashes for inbound
5673         /// payments.
5674         ///
5675         /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
5676         /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
5677         /// before a [`PaymentClaimable`] event will be generated, ensuring that we do not provide the
5678         /// sender "proof-of-payment" unless they have paid the required amount.
5679         ///
5680         /// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
5681         /// in excess of the current time. This should roughly match the expiry time set in the invoice.
5682         /// After this many seconds, we will remove the inbound payment, resulting in any attempts to
5683         /// pay the invoice failing. The BOLT spec suggests 3,600 secs as a default validity time for
5684         /// invoices when no timeout is set.
5685         ///
5686         /// Note that we use block header time to time-out pending inbound payments (with some margin
5687         /// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
5688         /// accept a payment and generate a [`PaymentClaimable`] event for some time after the expiry.
5689         /// If you need exact expiry semantics, you should enforce them upon receipt of
5690         /// [`PaymentClaimable`].
5691         ///
5692         /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry_delta`
5693         /// set to at least [`MIN_FINAL_CLTV_EXPIRY_DELTA`].
5694         ///
5695         /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
5696         /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
5697         ///
5698         /// # Note
5699         ///
5700         /// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
5701         /// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
5702         ///
5703         /// Errors if `min_value_msat` is greater than total bitcoin supply.
5704         ///
5705         /// If `min_final_cltv_expiry_delta` is set to some value, then the payment will not be receivable
5706         /// on versions of LDK prior to 0.0.114.
5707         ///
5708         /// [`create_inbound_payment`]: Self::create_inbound_payment
5709         /// [`PaymentClaimable`]: events::Event::PaymentClaimable
5710         pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>,
5711                 invoice_expiry_delta_secs: u32, min_final_cltv_expiry: Option<u16>) -> Result<PaymentSecret, ()> {
5712                 inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash,
5713                         invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64,
5714                         min_final_cltv_expiry)
5715         }
5716
5717         /// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share
5718         /// serialized state with LDK node(s) running 0.0.103 and earlier.
5719         ///
5720         /// May panic if `invoice_expiry_delta_secs` is greater than one year.
5721         ///
5722         /// # Note
5723         /// This method is deprecated and will be removed soon.
5724         ///
5725         /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
5726         #[deprecated]
5727         pub fn create_inbound_payment_for_hash_legacy(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
5728                 self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs)
5729         }
5730
5731         /// Gets an LDK-generated payment preimage from a payment hash and payment secret that were
5732         /// previously returned from [`create_inbound_payment`].
5733         ///
5734         /// [`create_inbound_payment`]: Self::create_inbound_payment
5735         pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result<PaymentPreimage, APIError> {
5736                 inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
5737         }
5738
5739         /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
5740         /// are used when constructing the phantom invoice's route hints.
5741         ///
5742         /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
5743         pub fn get_phantom_scid(&self) -> u64 {
5744                 let best_block_height = self.best_block.read().unwrap().height();
5745                 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
5746                 loop {
5747                         let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
5748                         // Ensure the generated scid doesn't conflict with a real channel.
5749                         match short_to_chan_info.get(&scid_candidate) {
5750                                 Some(_) => continue,
5751                                 None => return scid_candidate
5752                         }
5753                 }
5754         }
5755
5756         /// Gets route hints for use in receiving [phantom node payments].
5757         ///
5758         /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
5759         pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
5760                 PhantomRouteHints {
5761                         channels: self.list_usable_channels(),
5762                         phantom_scid: self.get_phantom_scid(),
5763                         real_node_pubkey: self.get_our_node_id(),
5764                 }
5765         }
5766
5767         /// Gets a fake short channel id for use in receiving intercepted payments. These fake scids are
5768         /// used when constructing the route hints for HTLCs intended to be intercepted. See
5769         /// [`ChannelManager::forward_intercepted_htlc`].
5770         ///
5771         /// Note that this method is not guaranteed to return unique values, you may need to call it a few
5772         /// times to get a unique scid.
5773         pub fn get_intercept_scid(&self) -> u64 {
5774                 let best_block_height = self.best_block.read().unwrap().height();
5775                 let short_to_chan_info = self.short_to_chan_info.read().unwrap();
5776                 loop {
5777                         let scid_candidate = fake_scid::Namespace::Intercept.get_fake_scid(best_block_height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.entropy_source);
5778                         // Ensure the generated scid doesn't conflict with a real channel.
5779                         if short_to_chan_info.contains_key(&scid_candidate) { continue }
5780                         return scid_candidate
5781                 }
5782         }
5783
5784         /// Gets inflight HTLC information by processing pending outbound payments that are in
5785         /// our channels. May be used during pathfinding to account for in-use channel liquidity.
5786         pub fn compute_inflight_htlcs(&self) -> InFlightHtlcs {
5787                 let mut inflight_htlcs = InFlightHtlcs::new();
5788
5789                 let per_peer_state = self.per_peer_state.read().unwrap();
5790                 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
5791                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5792                         let peer_state = &mut *peer_state_lock;
5793                         for chan in peer_state.channel_by_id.values() {
5794                                 for (htlc_source, _) in chan.inflight_htlc_sources() {
5795                                         if let HTLCSource::OutboundRoute { path, .. } = htlc_source {
5796                                                 inflight_htlcs.process_path(path, self.get_our_node_id());
5797                                         }
5798                                 }
5799                         }
5800                 }
5801
5802                 inflight_htlcs
5803         }
5804
5805         #[cfg(any(test, fuzzing, feature = "_test_utils"))]
5806         pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
5807                 let events = core::cell::RefCell::new(Vec::new());
5808                 let event_handler = |event: events::Event| events.borrow_mut().push(event);
5809                 self.process_pending_events(&event_handler);
5810                 events.into_inner()
5811         }
5812
5813         #[cfg(feature = "_test_utils")]
5814         pub fn push_pending_event(&self, event: events::Event) {
5815                 let mut events = self.pending_events.lock().unwrap();
5816                 events.push(event);
5817         }
5818
5819         #[cfg(test)]
5820         pub fn pop_pending_event(&self) -> Option<events::Event> {
5821                 let mut events = self.pending_events.lock().unwrap();
5822                 if events.is_empty() { None } else { Some(events.remove(0)) }
5823         }
5824
5825         #[cfg(test)]
5826         pub fn has_pending_payments(&self) -> bool {
5827                 self.pending_outbound_payments.has_pending_payments()
5828         }
5829
5830         #[cfg(test)]
5831         pub fn clear_pending_payments(&self) {
5832                 self.pending_outbound_payments.clear_pending_payments()
5833         }
5834
5835         /// Processes any events asynchronously in the order they were generated since the last call
5836         /// using the given event handler.
5837         ///
5838         /// See the trait-level documentation of [`EventsProvider`] for requirements.
5839         pub async fn process_pending_events_async<Future: core::future::Future, H: Fn(Event) -> Future>(
5840                 &self, handler: H
5841         ) {
5842                 let mut ev;
5843                 process_events_body!(self, ev, { handler(ev).await });
5844         }
5845 }
5846
5847 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
5848 where
5849         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
5850         T::Target: BroadcasterInterface,
5851         ES::Target: EntropySource,
5852         NS::Target: NodeSigner,
5853         SP::Target: SignerProvider,
5854         F::Target: FeeEstimator,
5855         R::Target: Router,
5856         L::Target: Logger,
5857 {
5858         /// Returns `MessageSendEvent`s strictly ordered per-peer, in the order they were generated.
5859         /// The returned array will contain `MessageSendEvent`s for different peers if
5860         /// `MessageSendEvent`s to more than one peer exists, but `MessageSendEvent`s to the same peer
5861         /// is always placed next to each other.
5862         ///
5863         /// Note that that while `MessageSendEvent`s are strictly ordered per-peer, the peer order for
5864         /// the chunks of `MessageSendEvent`s for different peers is random. I.e. if the array contains
5865         /// `MessageSendEvent`s  for both `node_a` and `node_b`, the `MessageSendEvent`s for `node_a`
5866         /// will randomly be placed first or last in the returned array.
5867         ///
5868         /// Note that even though `BroadcastChannelAnnouncement` and `BroadcastChannelUpdate`
5869         /// `MessageSendEvent`s are intended to be broadcasted to all peers, they will be pleaced among
5870         /// the `MessageSendEvent`s to the specific peer they were generated under.
5871         fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
5872                 let events = RefCell::new(Vec::new());
5873                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
5874                         let mut result = NotifyOption::SkipPersist;
5875
5876                         // TODO: This behavior should be documented. It's unintuitive that we query
5877                         // ChannelMonitors when clearing other events.
5878                         if self.process_pending_monitor_events() {
5879                                 result = NotifyOption::DoPersist;
5880                         }
5881
5882                         if self.check_free_holding_cells() {
5883                                 result = NotifyOption::DoPersist;
5884                         }
5885                         if self.maybe_generate_initial_closing_signed() {
5886                                 result = NotifyOption::DoPersist;
5887                         }
5888
5889                         let mut pending_events = Vec::new();
5890                         let per_peer_state = self.per_peer_state.read().unwrap();
5891                         for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
5892                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
5893                                 let peer_state = &mut *peer_state_lock;
5894                                 if peer_state.pending_msg_events.len() > 0 {
5895                                         pending_events.append(&mut peer_state.pending_msg_events);
5896                                 }
5897                         }
5898
5899                         if !pending_events.is_empty() {
5900                                 events.replace(pending_events);
5901                         }
5902
5903                         result
5904                 });
5905                 events.into_inner()
5906         }
5907 }
5908
5909 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> EventsProvider for ChannelManager<M, T, ES, NS, SP, F, R, L>
5910 where
5911         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
5912         T::Target: BroadcasterInterface,
5913         ES::Target: EntropySource,
5914         NS::Target: NodeSigner,
5915         SP::Target: SignerProvider,
5916         F::Target: FeeEstimator,
5917         R::Target: Router,
5918         L::Target: Logger,
5919 {
5920         /// Processes events that must be periodically handled.
5921         ///
5922         /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
5923         /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
5924         fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
5925                 let mut ev;
5926                 process_events_body!(self, ev, handler.handle_event(ev));
5927         }
5928 }
5929
5930 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Listen for ChannelManager<M, T, ES, NS, SP, F, R, L>
5931 where
5932         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
5933         T::Target: BroadcasterInterface,
5934         ES::Target: EntropySource,
5935         NS::Target: NodeSigner,
5936         SP::Target: SignerProvider,
5937         F::Target: FeeEstimator,
5938         R::Target: Router,
5939         L::Target: Logger,
5940 {
5941         fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
5942                 {
5943                         let best_block = self.best_block.read().unwrap();
5944                         assert_eq!(best_block.block_hash(), header.prev_blockhash,
5945                                 "Blocks must be connected in chain-order - the connected header must build on the last connected header");
5946                         assert_eq!(best_block.height(), height - 1,
5947                                 "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
5948                 }
5949
5950                 self.transactions_confirmed(header, txdata, height);
5951                 self.best_block_updated(header, height);
5952         }
5953
5954         fn block_disconnected(&self, header: &BlockHeader, height: u32) {
5955                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5956                 let new_height = height - 1;
5957                 {
5958                         let mut best_block = self.best_block.write().unwrap();
5959                         assert_eq!(best_block.block_hash(), header.block_hash(),
5960                                 "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
5961                         assert_eq!(best_block.height(), height,
5962                                 "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
5963                         *best_block = BestBlock::new(header.prev_blockhash, new_height)
5964                 }
5965
5966                 self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
5967         }
5968 }
5969
5970 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> chain::Confirm for ChannelManager<M, T, ES, NS, SP, F, R, L>
5971 where
5972         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
5973         T::Target: BroadcasterInterface,
5974         ES::Target: EntropySource,
5975         NS::Target: NodeSigner,
5976         SP::Target: SignerProvider,
5977         F::Target: FeeEstimator,
5978         R::Target: Router,
5979         L::Target: Logger,
5980 {
5981         fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
5982                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
5983                 // during initialization prior to the chain_monitor being fully configured in some cases.
5984                 // See the docs for `ChannelManagerReadArgs` for more.
5985
5986                 let block_hash = header.block_hash();
5987                 log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
5988
5989                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5990                 self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger)
5991                         .map(|(a, b)| (a, Vec::new(), b)));
5992
5993                 let last_best_block_height = self.best_block.read().unwrap().height();
5994                 if height < last_best_block_height {
5995                         let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
5996                         self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
5997                 }
5998         }
5999
6000         fn best_block_updated(&self, header: &BlockHeader, height: u32) {
6001                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
6002                 // during initialization prior to the chain_monitor being fully configured in some cases.
6003                 // See the docs for `ChannelManagerReadArgs` for more.
6004
6005                 let block_hash = header.block_hash();
6006                 log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
6007
6008                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6009
6010                 *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
6011
6012                 self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), &self.node_signer, &self.default_configuration, &self.logger));
6013
6014                 macro_rules! max_time {
6015                         ($timestamp: expr) => {
6016                                 loop {
6017                                         // Update $timestamp to be the max of its current value and the block
6018                                         // timestamp. This should keep us close to the current time without relying on
6019                                         // having an explicit local time source.
6020                                         // Just in case we end up in a race, we loop until we either successfully
6021                                         // update $timestamp or decide we don't need to.
6022                                         let old_serial = $timestamp.load(Ordering::Acquire);
6023                                         if old_serial >= header.time as usize { break; }
6024                                         if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
6025                                                 break;
6026                                         }
6027                                 }
6028                         }
6029                 }
6030                 max_time!(self.highest_seen_timestamp);
6031                 let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
6032                 payment_secrets.retain(|_, inbound_payment| {
6033                         inbound_payment.expiry_time > header.time as u64
6034                 });
6035         }
6036
6037         fn get_relevant_txids(&self) -> Vec<(Txid, Option<BlockHash>)> {
6038                 let mut res = Vec::with_capacity(self.short_to_chan_info.read().unwrap().len());
6039                 for (_cp_id, peer_state_mutex) in self.per_peer_state.read().unwrap().iter() {
6040                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6041                         let peer_state = &mut *peer_state_lock;
6042                         for chan in peer_state.channel_by_id.values() {
6043                                 if let (Some(funding_txo), Some(block_hash)) = (chan.get_funding_txo(), chan.get_funding_tx_confirmed_in()) {
6044                                         res.push((funding_txo.txid, Some(block_hash)));
6045                                 }
6046                         }
6047                 }
6048                 res
6049         }
6050
6051         fn transaction_unconfirmed(&self, txid: &Txid) {
6052                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6053                 self.do_chain_event(None, |channel| {
6054                         if let Some(funding_txo) = channel.get_funding_txo() {
6055                                 if funding_txo.txid == *txid {
6056                                         channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
6057                                 } else { Ok((None, Vec::new(), None)) }
6058                         } else { Ok((None, Vec::new(), None)) }
6059                 });
6060         }
6061 }
6062
6063 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> ChannelManager<M, T, ES, NS, SP, F, R, L>
6064 where
6065         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
6066         T::Target: BroadcasterInterface,
6067         ES::Target: EntropySource,
6068         NS::Target: NodeSigner,
6069         SP::Target: SignerProvider,
6070         F::Target: FeeEstimator,
6071         R::Target: Router,
6072         L::Target: Logger,
6073 {
6074         /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
6075         /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
6076         /// the function.
6077         fn do_chain_event<FN: Fn(&mut Channel<<SP::Target as SignerProvider>::Signer>) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
6078                         (&self, height_opt: Option<u32>, f: FN) {
6079                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
6080                 // during initialization prior to the chain_monitor being fully configured in some cases.
6081                 // See the docs for `ChannelManagerReadArgs` for more.
6082
6083                 let mut failed_channels = Vec::new();
6084                 let mut timed_out_htlcs = Vec::new();
6085                 {
6086                         let per_peer_state = self.per_peer_state.read().unwrap();
6087                         for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
6088                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6089                                 let peer_state = &mut *peer_state_lock;
6090                                 let pending_msg_events = &mut peer_state.pending_msg_events;
6091                                 peer_state.channel_by_id.retain(|_, channel| {
6092                                         let res = f(channel);
6093                                         if let Ok((channel_ready_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
6094                                                 for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
6095                                                         let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
6096                                                         timed_out_htlcs.push((source, payment_hash, HTLCFailReason::reason(failure_code, data),
6097                                                                 HTLCDestination::NextHopChannel { node_id: Some(channel.get_counterparty_node_id()), channel_id: channel.channel_id() }));
6098                                                 }
6099                                                 if let Some(channel_ready) = channel_ready_opt {
6100                                                         send_channel_ready!(self, pending_msg_events, channel, channel_ready);
6101                                                         if channel.is_usable() {
6102                                                                 log_trace!(self.logger, "Sending channel_ready with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
6103                                                                 if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
6104                                                                         pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
6105                                                                                 node_id: channel.get_counterparty_node_id(),
6106                                                                                 msg,
6107                                                                         });
6108                                                                 }
6109                                                         } else {
6110                                                                 log_trace!(self.logger, "Sending channel_ready WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
6111                                                         }
6112                                                 }
6113
6114                                                 {
6115                                                         let mut pending_events = self.pending_events.lock().unwrap();
6116                                                         emit_channel_ready_event!(pending_events, channel);
6117                                                 }
6118
6119                                                 if let Some(announcement_sigs) = announcement_sigs {
6120                                                         log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
6121                                                         pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
6122                                                                 node_id: channel.get_counterparty_node_id(),
6123                                                                 msg: announcement_sigs,
6124                                                         });
6125                                                         if let Some(height) = height_opt {
6126                                                                 if let Some(announcement) = channel.get_signed_channel_announcement(&self.node_signer, self.genesis_hash, height, &self.default_configuration) {
6127                                                                         pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
6128                                                                                 msg: announcement,
6129                                                                                 // Note that announcement_signatures fails if the channel cannot be announced,
6130                                                                                 // so get_channel_update_for_broadcast will never fail by the time we get here.
6131                                                                                 update_msg: Some(self.get_channel_update_for_broadcast(channel).unwrap()),
6132                                                                         });
6133                                                                 }
6134                                                         }
6135                                                 }
6136                                                 if channel.is_our_channel_ready() {
6137                                                         if let Some(real_scid) = channel.get_short_channel_id() {
6138                                                                 // If we sent a 0conf channel_ready, and now have an SCID, we add it
6139                                                                 // to the short_to_chan_info map here. Note that we check whether we
6140                                                                 // can relay using the real SCID at relay-time (i.e.
6141                                                                 // enforce option_scid_alias then), and if the funding tx is ever
6142                                                                 // un-confirmed we force-close the channel, ensuring short_to_chan_info
6143                                                                 // is always consistent.
6144                                                                 let mut short_to_chan_info = self.short_to_chan_info.write().unwrap();
6145                                                                 let scid_insert = short_to_chan_info.insert(real_scid, (channel.get_counterparty_node_id(), channel.channel_id()));
6146                                                                 assert!(scid_insert.is_none() || scid_insert.unwrap() == (channel.get_counterparty_node_id(), channel.channel_id()),
6147                                                                         "SCIDs should never collide - ensure you weren't behind by a full {} blocks when creating channels",
6148                                                                         fake_scid::MAX_SCID_BLOCKS_FROM_NOW);
6149                                                         }
6150                                                 }
6151                                         } else if let Err(reason) = res {
6152                                                 update_maps_on_chan_removal!(self, channel);
6153                                                 // It looks like our counterparty went on-chain or funding transaction was
6154                                                 // reorged out of the main chain. Close the channel.
6155                                                 failed_channels.push(channel.force_shutdown(true));
6156                                                 if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
6157                                                         pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
6158                                                                 msg: update
6159                                                         });
6160                                                 }
6161                                                 let reason_message = format!("{}", reason);
6162                                                 self.issue_channel_close_events(channel, reason);
6163                                                 pending_msg_events.push(events::MessageSendEvent::HandleError {
6164                                                         node_id: channel.get_counterparty_node_id(),
6165                                                         action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
6166                                                                 channel_id: channel.channel_id(),
6167                                                                 data: reason_message,
6168                                                         } },
6169                                                 });
6170                                                 return false;
6171                                         }
6172                                         true
6173                                 });
6174                         }
6175                 }
6176
6177                 if let Some(height) = height_opt {
6178                         self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
6179                                 htlcs.retain(|htlc| {
6180                                         // If height is approaching the number of blocks we think it takes us to get
6181                                         // our commitment transaction confirmed before the HTLC expires, plus the
6182                                         // number of blocks we generally consider it to take to do a commitment update,
6183                                         // just give up on it and fail the HTLC.
6184                                         if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
6185                                                 let mut htlc_msat_height_data = htlc.value.to_be_bytes().to_vec();
6186                                                 htlc_msat_height_data.extend_from_slice(&height.to_be_bytes());
6187
6188                                                 timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(),
6189                                                         HTLCFailReason::reason(0x4000 | 15, htlc_msat_height_data),
6190                                                         HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() }));
6191                                                 false
6192                                         } else { true }
6193                                 });
6194                                 !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
6195                         });
6196
6197                         let mut intercepted_htlcs = self.pending_intercepted_htlcs.lock().unwrap();
6198                         intercepted_htlcs.retain(|_, htlc| {
6199                                 if height >= htlc.forward_info.outgoing_cltv_value - HTLC_FAIL_BACK_BUFFER {
6200                                         let prev_hop_data = HTLCSource::PreviousHopData(HTLCPreviousHopData {
6201                                                 short_channel_id: htlc.prev_short_channel_id,
6202                                                 htlc_id: htlc.prev_htlc_id,
6203                                                 incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret,
6204                                                 phantom_shared_secret: None,
6205                                                 outpoint: htlc.prev_funding_outpoint,
6206                                         });
6207
6208                                         let requested_forward_scid /* intercept scid */ = match htlc.forward_info.routing {
6209                                                 PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
6210                                                 _ => unreachable!(),
6211                                         };
6212                                         timed_out_htlcs.push((prev_hop_data, htlc.forward_info.payment_hash,
6213                                                         HTLCFailReason::from_failure_code(0x2000 | 2),
6214                                                         HTLCDestination::InvalidForward { requested_forward_scid }));
6215                                         log_trace!(self.logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid);
6216                                         false
6217                                 } else { true }
6218                         });
6219                 }
6220
6221                 self.handle_init_event_channel_failures(failed_channels);
6222
6223                 for (source, payment_hash, reason, destination) in timed_out_htlcs.drain(..) {
6224                         self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, destination);
6225                 }
6226         }
6227
6228         /// Gets a [`Future`] that completes when this [`ChannelManager`] needs to be persisted.
6229         ///
6230         /// Note that callbacks registered on the [`Future`] MUST NOT call back into this
6231         /// [`ChannelManager`] and should instead register actions to be taken later.
6232         ///
6233         pub fn get_persistable_update_future(&self) -> Future {
6234                 self.persistence_notifier.get_future()
6235         }
6236
6237         #[cfg(any(test, feature = "_test_utils"))]
6238         pub fn get_persistence_condvar_value(&self) -> bool {
6239                 self.persistence_notifier.notify_pending()
6240         }
6241
6242         /// Gets the latest best block which was connected either via the [`chain::Listen`] or
6243         /// [`chain::Confirm`] interfaces.
6244         pub fn current_best_block(&self) -> BestBlock {
6245                 self.best_block.read().unwrap().clone()
6246         }
6247
6248         /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
6249         /// [`ChannelManager`].
6250         pub fn node_features(&self) -> NodeFeatures {
6251                 provided_node_features(&self.default_configuration)
6252         }
6253
6254         /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
6255         /// [`ChannelManager`].
6256         ///
6257         /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
6258         /// or not. Thus, this method is not public.
6259         #[cfg(any(feature = "_test_utils", test))]
6260         pub fn invoice_features(&self) -> InvoiceFeatures {
6261                 provided_invoice_features(&self.default_configuration)
6262         }
6263
6264         /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
6265         /// [`ChannelManager`].
6266         pub fn channel_features(&self) -> ChannelFeatures {
6267                 provided_channel_features(&self.default_configuration)
6268         }
6269
6270         /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by
6271         /// [`ChannelManager`].
6272         pub fn channel_type_features(&self) -> ChannelTypeFeatures {
6273                 provided_channel_type_features(&self.default_configuration)
6274         }
6275
6276         /// Fetches the set of [`InitFeatures`] flags which are provided by or required by
6277         /// [`ChannelManager`].
6278         pub fn init_features(&self) -> InitFeatures {
6279                 provided_init_features(&self.default_configuration)
6280         }
6281 }
6282
6283 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
6284         ChannelMessageHandler for ChannelManager<M, T, ES, NS, SP, F, R, L>
6285 where
6286         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
6287         T::Target: BroadcasterInterface,
6288         ES::Target: EntropySource,
6289         NS::Target: NodeSigner,
6290         SP::Target: SignerProvider,
6291         F::Target: FeeEstimator,
6292         R::Target: Router,
6293         L::Target: Logger,
6294 {
6295         fn handle_open_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::OpenChannel) {
6296                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6297                 let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, msg), *counterparty_node_id);
6298         }
6299
6300         fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, msg: &msgs::AcceptChannel) {
6301                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6302                 let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, msg), *counterparty_node_id);
6303         }
6304
6305         fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
6306                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6307                 let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
6308         }
6309
6310         fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) {
6311                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6312                 let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
6313         }
6314
6315         fn handle_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) {
6316                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6317                 let _ = handle_error!(self, self.internal_channel_ready(counterparty_node_id, msg), *counterparty_node_id);
6318         }
6319
6320         fn handle_shutdown(&self, counterparty_node_id: &PublicKey, msg: &msgs::Shutdown) {
6321                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6322                 let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, msg), *counterparty_node_id);
6323         }
6324
6325         fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
6326                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6327                 let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id);
6328         }
6329
6330         fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
6331                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6332                 let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
6333         }
6334
6335         fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
6336                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6337                 let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id);
6338         }
6339
6340         fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
6341                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6342                 let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
6343         }
6344
6345         fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
6346                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6347                 let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
6348         }
6349
6350         fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
6351                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6352                 let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id);
6353         }
6354
6355         fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
6356                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6357                 let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id);
6358         }
6359
6360         fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
6361                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6362                 let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
6363         }
6364
6365         fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
6366                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6367                 let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
6368         }
6369
6370         fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
6371                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
6372                         if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
6373                                 persist
6374                         } else {
6375                                 NotifyOption::SkipPersist
6376                         }
6377                 });
6378         }
6379
6380         fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
6381                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6382                 let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
6383         }
6384
6385         fn peer_disconnected(&self, counterparty_node_id: &PublicKey) {
6386                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6387                 let mut failed_channels = Vec::new();
6388                 let mut per_peer_state = self.per_peer_state.write().unwrap();
6389                 let remove_peer = {
6390                         log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates.",
6391                                 log_pubkey!(counterparty_node_id));
6392                         if let Some(peer_state_mutex) = per_peer_state.get(counterparty_node_id) {
6393                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6394                                 let peer_state = &mut *peer_state_lock;
6395                                 let pending_msg_events = &mut peer_state.pending_msg_events;
6396                                 peer_state.channel_by_id.retain(|_, chan| {
6397                                         chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
6398                                         if chan.is_shutdown() {
6399                                                 update_maps_on_chan_removal!(self, chan);
6400                                                 self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
6401                                                 return false;
6402                                         }
6403                                         true
6404                                 });
6405                                 pending_msg_events.retain(|msg| {
6406                                         match msg {
6407                                                 &events::MessageSendEvent::SendAcceptChannel { .. } => false,
6408                                                 &events::MessageSendEvent::SendOpenChannel { .. } => false,
6409                                                 &events::MessageSendEvent::SendFundingCreated { .. } => false,
6410                                                 &events::MessageSendEvent::SendFundingSigned { .. } => false,
6411                                                 &events::MessageSendEvent::SendChannelReady { .. } => false,
6412                                                 &events::MessageSendEvent::SendAnnouncementSignatures { .. } => false,
6413                                                 &events::MessageSendEvent::UpdateHTLCs { .. } => false,
6414                                                 &events::MessageSendEvent::SendRevokeAndACK { .. } => false,
6415                                                 &events::MessageSendEvent::SendClosingSigned { .. } => false,
6416                                                 &events::MessageSendEvent::SendShutdown { .. } => false,
6417                                                 &events::MessageSendEvent::SendChannelReestablish { .. } => false,
6418                                                 &events::MessageSendEvent::SendChannelAnnouncement { .. } => false,
6419                                                 &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
6420                                                 &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
6421                                                 &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
6422                                                 &events::MessageSendEvent::SendChannelUpdate { .. } => false,
6423                                                 &events::MessageSendEvent::HandleError { .. } => false,
6424                                                 &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
6425                                                 &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
6426                                                 &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
6427                                                 &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
6428                                         }
6429                                 });
6430                                 debug_assert!(peer_state.is_connected, "A disconnected peer cannot disconnect");
6431                                 peer_state.is_connected = false;
6432                                 peer_state.ok_to_remove(true)
6433                         } else { debug_assert!(false, "Unconnected peer disconnected"); true }
6434                 };
6435                 if remove_peer {
6436                         per_peer_state.remove(counterparty_node_id);
6437                 }
6438                 mem::drop(per_peer_state);
6439
6440                 for failure in failed_channels.drain(..) {
6441                         self.finish_force_close_channel(failure);
6442                 }
6443         }
6444
6445         fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init, inbound: bool) -> Result<(), ()> {
6446                 if !init_msg.features.supports_static_remote_key() {
6447                         log_debug!(self.logger, "Peer {} does not support static remote key, disconnecting", log_pubkey!(counterparty_node_id));
6448                         return Err(());
6449                 }
6450
6451                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6452
6453                 // If we have too many peers connected which don't have funded channels, disconnect the
6454                 // peer immediately (as long as it doesn't have funded channels). If we have a bunch of
6455                 // unfunded channels taking up space in memory for disconnected peers, we still let new
6456                 // peers connect, but we'll reject new channels from them.
6457                 let connected_peers_without_funded_channels = self.peers_without_funded_channels(|node| node.is_connected);
6458                 let inbound_peer_limited = inbound && connected_peers_without_funded_channels >= MAX_NO_CHANNEL_PEERS;
6459
6460                 {
6461                         let mut peer_state_lock = self.per_peer_state.write().unwrap();
6462                         match peer_state_lock.entry(counterparty_node_id.clone()) {
6463                                 hash_map::Entry::Vacant(e) => {
6464                                         if inbound_peer_limited {
6465                                                 return Err(());
6466                                         }
6467                                         e.insert(Mutex::new(PeerState {
6468                                                 channel_by_id: HashMap::new(),
6469                                                 latest_features: init_msg.features.clone(),
6470                                                 pending_msg_events: Vec::new(),
6471                                                 monitor_update_blocked_actions: BTreeMap::new(),
6472                                                 is_connected: true,
6473                                         }));
6474                                 },
6475                                 hash_map::Entry::Occupied(e) => {
6476                                         let mut peer_state = e.get().lock().unwrap();
6477                                         peer_state.latest_features = init_msg.features.clone();
6478
6479                                         let best_block_height = self.best_block.read().unwrap().height();
6480                                         if inbound_peer_limited &&
6481                                                 Self::unfunded_channel_count(&*peer_state, best_block_height) ==
6482                                                 peer_state.channel_by_id.len()
6483                                         {
6484                                                 return Err(());
6485                                         }
6486
6487                                         debug_assert!(!peer_state.is_connected, "A peer shouldn't be connected twice");
6488                                         peer_state.is_connected = true;
6489                                 },
6490                         }
6491                 }
6492
6493                 log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
6494
6495                 let per_peer_state = self.per_peer_state.read().unwrap();
6496                 for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
6497                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6498                         let peer_state = &mut *peer_state_lock;
6499                         let pending_msg_events = &mut peer_state.pending_msg_events;
6500                         peer_state.channel_by_id.retain(|_, chan| {
6501                                 let retain = if chan.get_counterparty_node_id() == *counterparty_node_id {
6502                                         if !chan.have_received_message() {
6503                                                 // If we created this (outbound) channel while we were disconnected from the
6504                                                 // peer we probably failed to send the open_channel message, which is now
6505                                                 // lost. We can't have had anything pending related to this channel, so we just
6506                                                 // drop it.
6507                                                 false
6508                                         } else {
6509                                                 pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
6510                                                         node_id: chan.get_counterparty_node_id(),
6511                                                         msg: chan.get_channel_reestablish(&self.logger),
6512                                                 });
6513                                                 true
6514                                         }
6515                                 } else { true };
6516                                 if retain && chan.get_counterparty_node_id() != *counterparty_node_id {
6517                                         if let Some(msg) = chan.get_signed_channel_announcement(&self.node_signer, self.genesis_hash.clone(), self.best_block.read().unwrap().height(), &self.default_configuration) {
6518                                                 if let Ok(update_msg) = self.get_channel_update_for_broadcast(chan) {
6519                                                         pending_msg_events.push(events::MessageSendEvent::SendChannelAnnouncement {
6520                                                                 node_id: *counterparty_node_id,
6521                                                                 msg, update_msg,
6522                                                         });
6523                                                 }
6524                                         }
6525                                 }
6526                                 retain
6527                         });
6528                 }
6529                 //TODO: Also re-broadcast announcement_signatures
6530                 Ok(())
6531         }
6532
6533         fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
6534                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
6535
6536                 if msg.channel_id == [0; 32] {
6537                         let channel_ids: Vec<[u8; 32]> = {
6538                                 let per_peer_state = self.per_peer_state.read().unwrap();
6539                                 let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
6540                                 if peer_state_mutex_opt.is_none() { return; }
6541                                 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
6542                                 let peer_state = &mut *peer_state_lock;
6543                                 peer_state.channel_by_id.keys().cloned().collect()
6544                         };
6545                         for channel_id in channel_ids {
6546                                 // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
6547                                 let _ = self.force_close_channel_with_peer(&channel_id, counterparty_node_id, Some(&msg.data), true);
6548                         }
6549                 } else {
6550                         {
6551                                 // First check if we can advance the channel type and try again.
6552                                 let per_peer_state = self.per_peer_state.read().unwrap();
6553                                 let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
6554                                 if peer_state_mutex_opt.is_none() { return; }
6555                                 let mut peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap();
6556                                 let peer_state = &mut *peer_state_lock;
6557                                 if let Some(chan) = peer_state.channel_by_id.get_mut(&msg.channel_id) {
6558                                         if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
6559                                                 peer_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
6560                                                         node_id: *counterparty_node_id,
6561                                                         msg,
6562                                                 });
6563                                                 return;
6564                                         }
6565                                 }
6566                         }
6567
6568                         // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
6569                         let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data), true);
6570                 }
6571         }
6572
6573         fn provided_node_features(&self) -> NodeFeatures {
6574                 provided_node_features(&self.default_configuration)
6575         }
6576
6577         fn provided_init_features(&self, _their_init_features: &PublicKey) -> InitFeatures {
6578                 provided_init_features(&self.default_configuration)
6579         }
6580 }
6581
6582 /// Fetches the set of [`NodeFeatures`] flags which are provided by or required by
6583 /// [`ChannelManager`].
6584 pub(crate) fn provided_node_features(config: &UserConfig) -> NodeFeatures {
6585         provided_init_features(config).to_context()
6586 }
6587
6588 /// Fetches the set of [`InvoiceFeatures`] flags which are provided by or required by
6589 /// [`ChannelManager`].
6590 ///
6591 /// Note that the invoice feature flags can vary depending on if the invoice is a "phantom invoice"
6592 /// or not. Thus, this method is not public.
6593 #[cfg(any(feature = "_test_utils", test))]
6594 pub(crate) fn provided_invoice_features(config: &UserConfig) -> InvoiceFeatures {
6595         provided_init_features(config).to_context()
6596 }
6597
6598 /// Fetches the set of [`ChannelFeatures`] flags which are provided by or required by
6599 /// [`ChannelManager`].
6600 pub(crate) fn provided_channel_features(config: &UserConfig) -> ChannelFeatures {
6601         provided_init_features(config).to_context()
6602 }
6603
6604 /// Fetches the set of [`ChannelTypeFeatures`] flags which are provided by or required by
6605 /// [`ChannelManager`].
6606 pub(crate) fn provided_channel_type_features(config: &UserConfig) -> ChannelTypeFeatures {
6607         ChannelTypeFeatures::from_init(&provided_init_features(config))
6608 }
6609
6610 /// Fetches the set of [`InitFeatures`] flags which are provided by or required by
6611 /// [`ChannelManager`].
6612 pub fn provided_init_features(_config: &UserConfig) -> InitFeatures {
6613         // Note that if new features are added here which other peers may (eventually) require, we
6614         // should also add the corresponding (optional) bit to the [`ChannelMessageHandler`] impl for
6615         // [`ErroringMessageHandler`].
6616         let mut features = InitFeatures::empty();
6617         features.set_data_loss_protect_optional();
6618         features.set_upfront_shutdown_script_optional();
6619         features.set_variable_length_onion_required();
6620         features.set_static_remote_key_required();
6621         features.set_payment_secret_required();
6622         features.set_basic_mpp_optional();
6623         features.set_wumbo_optional();
6624         features.set_shutdown_any_segwit_optional();
6625         features.set_channel_type_optional();
6626         features.set_scid_privacy_optional();
6627         features.set_zero_conf_optional();
6628         #[cfg(anchors)]
6629         { // Attributes are not allowed on if expressions on our current MSRV of 1.41.
6630                 if _config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx {
6631                         features.set_anchors_zero_fee_htlc_tx_optional();
6632                 }
6633         }
6634         features
6635 }
6636
6637 const SERIALIZATION_VERSION: u8 = 1;
6638 const MIN_SERIALIZATION_VERSION: u8 = 1;
6639
6640 impl_writeable_tlv_based!(CounterpartyForwardingInfo, {
6641         (2, fee_base_msat, required),
6642         (4, fee_proportional_millionths, required),
6643         (6, cltv_expiry_delta, required),
6644 });
6645
6646 impl_writeable_tlv_based!(ChannelCounterparty, {
6647         (2, node_id, required),
6648         (4, features, required),
6649         (6, unspendable_punishment_reserve, required),
6650         (8, forwarding_info, option),
6651         (9, outbound_htlc_minimum_msat, option),
6652         (11, outbound_htlc_maximum_msat, option),
6653 });
6654
6655 impl Writeable for ChannelDetails {
6656         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6657                 // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
6658                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
6659                 let user_channel_id_low = self.user_channel_id as u64;
6660                 let user_channel_id_high_opt = Some((self.user_channel_id >> 64) as u64);
6661                 write_tlv_fields!(writer, {
6662                         (1, self.inbound_scid_alias, option),
6663                         (2, self.channel_id, required),
6664                         (3, self.channel_type, option),
6665                         (4, self.counterparty, required),
6666                         (5, self.outbound_scid_alias, option),
6667                         (6, self.funding_txo, option),
6668                         (7, self.config, option),
6669                         (8, self.short_channel_id, option),
6670                         (9, self.confirmations, option),
6671                         (10, self.channel_value_satoshis, required),
6672                         (12, self.unspendable_punishment_reserve, option),
6673                         (14, user_channel_id_low, required),
6674                         (16, self.balance_msat, required),
6675                         (18, self.outbound_capacity_msat, required),
6676                         // Note that by the time we get past the required read above, outbound_capacity_msat will be
6677                         // filled in, so we can safely unwrap it here.
6678                         (19, self.next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
6679                         (20, self.inbound_capacity_msat, required),
6680                         (22, self.confirmations_required, option),
6681                         (24, self.force_close_spend_delay, option),
6682                         (26, self.is_outbound, required),
6683                         (28, self.is_channel_ready, required),
6684                         (30, self.is_usable, required),
6685                         (32, self.is_public, required),
6686                         (33, self.inbound_htlc_minimum_msat, option),
6687                         (35, self.inbound_htlc_maximum_msat, option),
6688                         (37, user_channel_id_high_opt, option),
6689                         (39, self.feerate_sat_per_1000_weight, option),
6690                 });
6691                 Ok(())
6692         }
6693 }
6694
6695 impl Readable for ChannelDetails {
6696         fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
6697                 _init_and_read_tlv_fields!(reader, {
6698                         (1, inbound_scid_alias, option),
6699                         (2, channel_id, required),
6700                         (3, channel_type, option),
6701                         (4, counterparty, required),
6702                         (5, outbound_scid_alias, option),
6703                         (6, funding_txo, option),
6704                         (7, config, option),
6705                         (8, short_channel_id, option),
6706                         (9, confirmations, option),
6707                         (10, channel_value_satoshis, required),
6708                         (12, unspendable_punishment_reserve, option),
6709                         (14, user_channel_id_low, required),
6710                         (16, balance_msat, required),
6711                         (18, outbound_capacity_msat, required),
6712                         // Note that by the time we get past the required read above, outbound_capacity_msat will be
6713                         // filled in, so we can safely unwrap it here.
6714                         (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap() as u64)),
6715                         (20, inbound_capacity_msat, required),
6716                         (22, confirmations_required, option),
6717                         (24, force_close_spend_delay, option),
6718                         (26, is_outbound, required),
6719                         (28, is_channel_ready, required),
6720                         (30, is_usable, required),
6721                         (32, is_public, required),
6722                         (33, inbound_htlc_minimum_msat, option),
6723                         (35, inbound_htlc_maximum_msat, option),
6724                         (37, user_channel_id_high_opt, option),
6725                         (39, feerate_sat_per_1000_weight, option),
6726                 });
6727
6728                 // `user_channel_id` used to be a single u64 value. In order to remain backwards compatible with
6729                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values.
6730                 let user_channel_id_low: u64 = user_channel_id_low.0.unwrap();
6731                 let user_channel_id = user_channel_id_low as u128 +
6732                         ((user_channel_id_high_opt.unwrap_or(0 as u64) as u128) << 64);
6733
6734                 Ok(Self {
6735                         inbound_scid_alias,
6736                         channel_id: channel_id.0.unwrap(),
6737                         channel_type,
6738                         counterparty: counterparty.0.unwrap(),
6739                         outbound_scid_alias,
6740                         funding_txo,
6741                         config,
6742                         short_channel_id,
6743                         channel_value_satoshis: channel_value_satoshis.0.unwrap(),
6744                         unspendable_punishment_reserve,
6745                         user_channel_id,
6746                         balance_msat: balance_msat.0.unwrap(),
6747                         outbound_capacity_msat: outbound_capacity_msat.0.unwrap(),
6748                         next_outbound_htlc_limit_msat: next_outbound_htlc_limit_msat.0.unwrap(),
6749                         inbound_capacity_msat: inbound_capacity_msat.0.unwrap(),
6750                         confirmations_required,
6751                         confirmations,
6752                         force_close_spend_delay,
6753                         is_outbound: is_outbound.0.unwrap(),
6754                         is_channel_ready: is_channel_ready.0.unwrap(),
6755                         is_usable: is_usable.0.unwrap(),
6756                         is_public: is_public.0.unwrap(),
6757                         inbound_htlc_minimum_msat,
6758                         inbound_htlc_maximum_msat,
6759                         feerate_sat_per_1000_weight,
6760                 })
6761         }
6762 }
6763
6764 impl_writeable_tlv_based!(PhantomRouteHints, {
6765         (2, channels, vec_type),
6766         (4, phantom_scid, required),
6767         (6, real_node_pubkey, required),
6768 });
6769
6770 impl_writeable_tlv_based_enum!(PendingHTLCRouting,
6771         (0, Forward) => {
6772                 (0, onion_packet, required),
6773                 (2, short_channel_id, required),
6774         },
6775         (1, Receive) => {
6776                 (0, payment_data, required),
6777                 (1, phantom_shared_secret, option),
6778                 (2, incoming_cltv_expiry, required),
6779         },
6780         (2, ReceiveKeysend) => {
6781                 (0, payment_preimage, required),
6782                 (2, incoming_cltv_expiry, required),
6783         },
6784 ;);
6785
6786 impl_writeable_tlv_based!(PendingHTLCInfo, {
6787         (0, routing, required),
6788         (2, incoming_shared_secret, required),
6789         (4, payment_hash, required),
6790         (6, outgoing_amt_msat, required),
6791         (8, outgoing_cltv_value, required),
6792         (9, incoming_amt_msat, option),
6793 });
6794
6795
6796 impl Writeable for HTLCFailureMsg {
6797         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6798                 match self {
6799                         HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
6800                                 0u8.write(writer)?;
6801                                 channel_id.write(writer)?;
6802                                 htlc_id.write(writer)?;
6803                                 reason.write(writer)?;
6804                         },
6805                         HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
6806                                 channel_id, htlc_id, sha256_of_onion, failure_code
6807                         }) => {
6808                                 1u8.write(writer)?;
6809                                 channel_id.write(writer)?;
6810                                 htlc_id.write(writer)?;
6811                                 sha256_of_onion.write(writer)?;
6812                                 failure_code.write(writer)?;
6813                         },
6814                 }
6815                 Ok(())
6816         }
6817 }
6818
6819 impl Readable for HTLCFailureMsg {
6820         fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
6821                 let id: u8 = Readable::read(reader)?;
6822                 match id {
6823                         0 => {
6824                                 Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
6825                                         channel_id: Readable::read(reader)?,
6826                                         htlc_id: Readable::read(reader)?,
6827                                         reason: Readable::read(reader)?,
6828                                 }))
6829                         },
6830                         1 => {
6831                                 Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
6832                                         channel_id: Readable::read(reader)?,
6833                                         htlc_id: Readable::read(reader)?,
6834                                         sha256_of_onion: Readable::read(reader)?,
6835                                         failure_code: Readable::read(reader)?,
6836                                 }))
6837                         },
6838                         // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
6839                         // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
6840                         // messages contained in the variants.
6841                         // In version 0.0.101, support for reading the variants with these types was added, and
6842                         // we should migrate to writing these variants when UpdateFailHTLC or
6843                         // UpdateFailMalformedHTLC get TLV fields.
6844                         2 => {
6845                                 let length: BigSize = Readable::read(reader)?;
6846                                 let mut s = FixedLengthReader::new(reader, length.0);
6847                                 let res = Readable::read(&mut s)?;
6848                                 s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
6849                                 Ok(HTLCFailureMsg::Relay(res))
6850                         },
6851                         3 => {
6852                                 let length: BigSize = Readable::read(reader)?;
6853                                 let mut s = FixedLengthReader::new(reader, length.0);
6854                                 let res = Readable::read(&mut s)?;
6855                                 s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
6856                                 Ok(HTLCFailureMsg::Malformed(res))
6857                         },
6858                         _ => Err(DecodeError::UnknownRequiredFeature),
6859                 }
6860         }
6861 }
6862
6863 impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
6864         (0, Forward),
6865         (1, Fail),
6866 );
6867
6868 impl_writeable_tlv_based!(HTLCPreviousHopData, {
6869         (0, short_channel_id, required),
6870         (1, phantom_shared_secret, option),
6871         (2, outpoint, required),
6872         (4, htlc_id, required),
6873         (6, incoming_packet_shared_secret, required)
6874 });
6875
6876 impl Writeable for ClaimableHTLC {
6877         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6878                 let (payment_data, keysend_preimage) = match &self.onion_payload {
6879                         OnionPayload::Invoice { _legacy_hop_data } => (_legacy_hop_data.as_ref(), None),
6880                         OnionPayload::Spontaneous(preimage) => (None, Some(preimage)),
6881                 };
6882                 write_tlv_fields!(writer, {
6883                         (0, self.prev_hop, required),
6884                         (1, self.total_msat, required),
6885                         (2, self.value, required),
6886                         (3, self.sender_intended_value, required),
6887                         (4, payment_data, option),
6888                         (5, self.total_value_received, option),
6889                         (6, self.cltv_expiry, required),
6890                         (8, keysend_preimage, option),
6891                 });
6892                 Ok(())
6893         }
6894 }
6895
6896 impl Readable for ClaimableHTLC {
6897         fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
6898                 let mut prev_hop = crate::util::ser::RequiredWrapper(None);
6899                 let mut value = 0;
6900                 let mut sender_intended_value = None;
6901                 let mut payment_data: Option<msgs::FinalOnionHopData> = None;
6902                 let mut cltv_expiry = 0;
6903                 let mut total_value_received = None;
6904                 let mut total_msat = None;
6905                 let mut keysend_preimage: Option<PaymentPreimage> = None;
6906                 read_tlv_fields!(reader, {
6907                         (0, prev_hop, required),
6908                         (1, total_msat, option),
6909                         (2, value, required),
6910                         (3, sender_intended_value, option),
6911                         (4, payment_data, option),
6912                         (5, total_value_received, option),
6913                         (6, cltv_expiry, required),
6914                         (8, keysend_preimage, option)
6915                 });
6916                 let onion_payload = match keysend_preimage {
6917                         Some(p) => {
6918                                 if payment_data.is_some() {
6919                                         return Err(DecodeError::InvalidValue)
6920                                 }
6921                                 if total_msat.is_none() {
6922                                         total_msat = Some(value);
6923                                 }
6924                                 OnionPayload::Spontaneous(p)
6925                         },
6926                         None => {
6927                                 if total_msat.is_none() {
6928                                         if payment_data.is_none() {
6929                                                 return Err(DecodeError::InvalidValue)
6930                                         }
6931                                         total_msat = Some(payment_data.as_ref().unwrap().total_msat);
6932                                 }
6933                                 OnionPayload::Invoice { _legacy_hop_data: payment_data }
6934                         },
6935                 };
6936                 Ok(Self {
6937                         prev_hop: prev_hop.0.unwrap(),
6938                         timer_ticks: 0,
6939                         value,
6940                         sender_intended_value: sender_intended_value.unwrap_or(value),
6941                         total_value_received,
6942                         total_msat: total_msat.unwrap(),
6943                         onion_payload,
6944                         cltv_expiry,
6945                 })
6946         }
6947 }
6948
6949 impl Readable for HTLCSource {
6950         fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
6951                 let id: u8 = Readable::read(reader)?;
6952                 match id {
6953                         0 => {
6954                                 let mut session_priv: crate::util::ser::RequiredWrapper<SecretKey> = crate::util::ser::RequiredWrapper(None);
6955                                 let mut first_hop_htlc_msat: u64 = 0;
6956                                 let mut path: Option<Vec<RouteHop>> = Some(Vec::new());
6957                                 let mut payment_id = None;
6958                                 let mut payment_params: Option<PaymentParameters> = None;
6959                                 read_tlv_fields!(reader, {
6960                                         (0, session_priv, required),
6961                                         (1, payment_id, option),
6962                                         (2, first_hop_htlc_msat, required),
6963                                         (4, path, vec_type),
6964                                         (5, payment_params, (option: ReadableArgs, 0)),
6965                                 });
6966                                 if payment_id.is_none() {
6967                                         // For backwards compat, if there was no payment_id written, use the session_priv bytes
6968                                         // instead.
6969                                         payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
6970                                 }
6971                                 if path.is_none() || path.as_ref().unwrap().is_empty() {
6972                                         return Err(DecodeError::InvalidValue);
6973                                 }
6974                                 let path = path.unwrap();
6975                                 if let Some(params) = payment_params.as_mut() {
6976                                         if params.final_cltv_expiry_delta == 0 {
6977                                                 params.final_cltv_expiry_delta = path.last().unwrap().cltv_expiry_delta;
6978                                         }
6979                                 }
6980                                 Ok(HTLCSource::OutboundRoute {
6981                                         session_priv: session_priv.0.unwrap(),
6982                                         first_hop_htlc_msat,
6983                                         path,
6984                                         payment_id: payment_id.unwrap(),
6985                                 })
6986                         }
6987                         1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
6988                         _ => Err(DecodeError::UnknownRequiredFeature),
6989                 }
6990         }
6991 }
6992
6993 impl Writeable for HTLCSource {
6994         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), crate::io::Error> {
6995                 match self {
6996                         HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id } => {
6997                                 0u8.write(writer)?;
6998                                 let payment_id_opt = Some(payment_id);
6999                                 write_tlv_fields!(writer, {
7000                                         (0, session_priv, required),
7001                                         (1, payment_id_opt, option),
7002                                         (2, first_hop_htlc_msat, required),
7003                                         // 3 was previously used to write a PaymentSecret for the payment.
7004                                         (4, *path, vec_type),
7005                                         (5, None::<PaymentParameters>, option), // payment_params in LDK versions prior to 0.0.115
7006                                  });
7007                         }
7008                         HTLCSource::PreviousHopData(ref field) => {
7009                                 1u8.write(writer)?;
7010                                 field.write(writer)?;
7011                         }
7012                 }
7013                 Ok(())
7014         }
7015 }
7016
7017 impl_writeable_tlv_based!(PendingAddHTLCInfo, {
7018         (0, forward_info, required),
7019         (1, prev_user_channel_id, (default_value, 0)),
7020         (2, prev_short_channel_id, required),
7021         (4, prev_htlc_id, required),
7022         (6, prev_funding_outpoint, required),
7023 });
7024
7025 impl_writeable_tlv_based_enum!(HTLCForwardInfo,
7026         (1, FailHTLC) => {
7027                 (0, htlc_id, required),
7028                 (2, err_packet, required),
7029         };
7030         (0, AddHTLC)
7031 );
7032
7033 impl_writeable_tlv_based!(PendingInboundPayment, {
7034         (0, payment_secret, required),
7035         (2, expiry_time, required),
7036         (4, user_payment_id, required),
7037         (6, payment_preimage, required),
7038         (8, min_value_msat, required),
7039 });
7040
7041 impl<M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref> Writeable for ChannelManager<M, T, ES, NS, SP, F, R, L>
7042 where
7043         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
7044         T::Target: BroadcasterInterface,
7045         ES::Target: EntropySource,
7046         NS::Target: NodeSigner,
7047         SP::Target: SignerProvider,
7048         F::Target: FeeEstimator,
7049         R::Target: Router,
7050         L::Target: Logger,
7051 {
7052         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7053                 let _consistency_lock = self.total_consistency_lock.write().unwrap();
7054
7055                 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7056
7057                 self.genesis_hash.write(writer)?;
7058                 {
7059                         let best_block = self.best_block.read().unwrap();
7060                         best_block.height().write(writer)?;
7061                         best_block.block_hash().write(writer)?;
7062                 }
7063
7064                 let mut serializable_peer_count: u64 = 0;
7065                 {
7066                         let per_peer_state = self.per_peer_state.read().unwrap();
7067                         let mut unfunded_channels = 0;
7068                         let mut number_of_channels = 0;
7069                         for (_, peer_state_mutex) in per_peer_state.iter() {
7070                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7071                                 let peer_state = &mut *peer_state_lock;
7072                                 if !peer_state.ok_to_remove(false) {
7073                                         serializable_peer_count += 1;
7074                                 }
7075                                 number_of_channels += peer_state.channel_by_id.len();
7076                                 for (_, channel) in peer_state.channel_by_id.iter() {
7077                                         if !channel.is_funding_initiated() {
7078                                                 unfunded_channels += 1;
7079                                         }
7080                                 }
7081                         }
7082
7083                         ((number_of_channels - unfunded_channels) as u64).write(writer)?;
7084
7085                         for (_, peer_state_mutex) in per_peer_state.iter() {
7086                                 let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7087                                 let peer_state = &mut *peer_state_lock;
7088                                 for (_, channel) in peer_state.channel_by_id.iter() {
7089                                         if channel.is_funding_initiated() {
7090                                                 channel.write(writer)?;
7091                                         }
7092                                 }
7093                         }
7094                 }
7095
7096                 {
7097                         let forward_htlcs = self.forward_htlcs.lock().unwrap();
7098                         (forward_htlcs.len() as u64).write(writer)?;
7099                         for (short_channel_id, pending_forwards) in forward_htlcs.iter() {
7100                                 short_channel_id.write(writer)?;
7101                                 (pending_forwards.len() as u64).write(writer)?;
7102                                 for forward in pending_forwards {
7103                                         forward.write(writer)?;
7104                                 }
7105                         }
7106                 }
7107
7108                 let per_peer_state = self.per_peer_state.write().unwrap();
7109
7110                 let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
7111                 let claimable_payments = self.claimable_payments.lock().unwrap();
7112                 let pending_outbound_payments = self.pending_outbound_payments.pending_outbound_payments.lock().unwrap();
7113
7114                 let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new();
7115                 (claimable_payments.claimable_htlcs.len() as u64).write(writer)?;
7116                 for (payment_hash, (purpose, previous_hops)) in claimable_payments.claimable_htlcs.iter() {
7117                         payment_hash.write(writer)?;
7118                         (previous_hops.len() as u64).write(writer)?;
7119                         for htlc in previous_hops.iter() {
7120                                 htlc.write(writer)?;
7121                         }
7122                         htlc_purposes.push(purpose);
7123                 }
7124
7125                 let mut monitor_update_blocked_actions_per_peer = None;
7126                 let mut peer_states = Vec::new();
7127                 for (_, peer_state_mutex) in per_peer_state.iter() {
7128                         // Because we're holding the owning `per_peer_state` write lock here there's no chance
7129                         // of a lockorder violation deadlock - no other thread can be holding any
7130                         // per_peer_state lock at all.
7131                         peer_states.push(peer_state_mutex.unsafe_well_ordered_double_lock_self());
7132                 }
7133
7134                 (serializable_peer_count).write(writer)?;
7135                 for ((peer_pubkey, _), peer_state) in per_peer_state.iter().zip(peer_states.iter()) {
7136                         // Peers which we have no channels to should be dropped once disconnected. As we
7137                         // disconnect all peers when shutting down and serializing the ChannelManager, we
7138                         // consider all peers as disconnected here. There's therefore no need write peers with
7139                         // no channels.
7140                         if !peer_state.ok_to_remove(false) {
7141                                 peer_pubkey.write(writer)?;
7142                                 peer_state.latest_features.write(writer)?;
7143                                 if !peer_state.monitor_update_blocked_actions.is_empty() {
7144                                         monitor_update_blocked_actions_per_peer
7145                                                 .get_or_insert_with(Vec::new)
7146                                                 .push((*peer_pubkey, &peer_state.monitor_update_blocked_actions));
7147                                 }
7148                         }
7149                 }
7150
7151                 let events = self.pending_events.lock().unwrap();
7152                 (events.len() as u64).write(writer)?;
7153                 for event in events.iter() {
7154                         event.write(writer)?;
7155                 }
7156
7157                 let background_events = self.pending_background_events.lock().unwrap();
7158                 (background_events.len() as u64).write(writer)?;
7159                 for event in background_events.iter() {
7160                         match event {
7161                                 BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
7162                                         0u8.write(writer)?;
7163                                         funding_txo.write(writer)?;
7164                                         monitor_update.write(writer)?;
7165                                 },
7166                         }
7167                 }
7168
7169                 // Prior to 0.0.111 we tracked node_announcement serials here, however that now happens in
7170                 // `PeerManager`, and thus we simply write the `highest_seen_timestamp` twice, which is
7171                 // likely to be identical.
7172                 (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
7173                 (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
7174
7175                 (pending_inbound_payments.len() as u64).write(writer)?;
7176                 for (hash, pending_payment) in pending_inbound_payments.iter() {
7177                         hash.write(writer)?;
7178                         pending_payment.write(writer)?;
7179                 }
7180
7181                 // For backwards compat, write the session privs and their total length.
7182                 let mut num_pending_outbounds_compat: u64 = 0;
7183                 for (_, outbound) in pending_outbound_payments.iter() {
7184                         if !outbound.is_fulfilled() && !outbound.abandoned() {
7185                                 num_pending_outbounds_compat += outbound.remaining_parts() as u64;
7186                         }
7187                 }
7188                 num_pending_outbounds_compat.write(writer)?;
7189                 for (_, outbound) in pending_outbound_payments.iter() {
7190                         match outbound {
7191                                 PendingOutboundPayment::Legacy { session_privs } |
7192                                 PendingOutboundPayment::Retryable { session_privs, .. } => {
7193                                         for session_priv in session_privs.iter() {
7194                                                 session_priv.write(writer)?;
7195                                         }
7196                                 }
7197                                 PendingOutboundPayment::Fulfilled { .. } => {},
7198                                 PendingOutboundPayment::Abandoned { .. } => {},
7199                         }
7200                 }
7201
7202                 // Encode without retry info for 0.0.101 compatibility.
7203                 let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
7204                 for (id, outbound) in pending_outbound_payments.iter() {
7205                         match outbound {
7206                                 PendingOutboundPayment::Legacy { session_privs } |
7207                                 PendingOutboundPayment::Retryable { session_privs, .. } => {
7208                                         pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
7209                                 },
7210                                 _ => {},
7211                         }
7212                 }
7213
7214                 let mut pending_intercepted_htlcs = None;
7215                 let our_pending_intercepts = self.pending_intercepted_htlcs.lock().unwrap();
7216                 if our_pending_intercepts.len() != 0 {
7217                         pending_intercepted_htlcs = Some(our_pending_intercepts);
7218                 }
7219
7220                 let mut pending_claiming_payments = Some(&claimable_payments.pending_claiming_payments);
7221                 if pending_claiming_payments.as_ref().unwrap().is_empty() {
7222                         // LDK versions prior to 0.0.113 do not know how to read the pending claimed payments
7223                         // map. Thus, if there are no entries we skip writing a TLV for it.
7224                         pending_claiming_payments = None;
7225                 }
7226
7227                 write_tlv_fields!(writer, {
7228                         (1, pending_outbound_payments_no_retry, required),
7229                         (2, pending_intercepted_htlcs, option),
7230                         (3, pending_outbound_payments, required),
7231                         (4, pending_claiming_payments, option),
7232                         (5, self.our_network_pubkey, required),
7233                         (6, monitor_update_blocked_actions_per_peer, option),
7234                         (7, self.fake_scid_rand_bytes, required),
7235                         (9, htlc_purposes, vec_type),
7236                         (11, self.probing_cookie_secret, required),
7237                 });
7238
7239                 Ok(())
7240         }
7241 }
7242
7243 /// Arguments for the creation of a ChannelManager that are not deserialized.
7244 ///
7245 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
7246 /// is:
7247 /// 1) Deserialize all stored [`ChannelMonitor`]s.
7248 /// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
7249 ///    `<(BlockHash, ChannelManager)>::read(reader, args)`
7250 ///    This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
7251 ///    [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
7252 /// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
7253 ///    same way you would handle a [`chain::Filter`] call using
7254 ///    [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
7255 /// 4) Reconnect blocks on your [`ChannelMonitor`]s.
7256 /// 5) Disconnect/connect blocks on the [`ChannelManager`].
7257 /// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
7258 ///    Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
7259 ///    will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
7260 ///    the next step.
7261 /// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
7262 ///    [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
7263 ///
7264 /// Note that the ordering of #4-7 is not of importance, however all four must occur before you
7265 /// call any other methods on the newly-deserialized [`ChannelManager`].
7266 ///
7267 /// Note that because some channels may be closed during deserialization, it is critical that you
7268 /// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
7269 /// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
7270 /// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
7271 /// not force-close the same channels but consider them live), you may end up revoking a state for
7272 /// which you've already broadcasted the transaction.
7273 ///
7274 /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
7275 pub struct ChannelManagerReadArgs<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
7276 where
7277         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
7278         T::Target: BroadcasterInterface,
7279         ES::Target: EntropySource,
7280         NS::Target: NodeSigner,
7281         SP::Target: SignerProvider,
7282         F::Target: FeeEstimator,
7283         R::Target: Router,
7284         L::Target: Logger,
7285 {
7286         /// A cryptographically secure source of entropy.
7287         pub entropy_source: ES,
7288
7289         /// A signer that is able to perform node-scoped cryptographic operations.
7290         pub node_signer: NS,
7291
7292         /// The keys provider which will give us relevant keys. Some keys will be loaded during
7293         /// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel
7294         /// signing data.
7295         pub signer_provider: SP,
7296
7297         /// The fee_estimator for use in the ChannelManager in the future.
7298         ///
7299         /// No calls to the FeeEstimator will be made during deserialization.
7300         pub fee_estimator: F,
7301         /// The chain::Watch for use in the ChannelManager in the future.
7302         ///
7303         /// No calls to the chain::Watch will be made during deserialization. It is assumed that
7304         /// you have deserialized ChannelMonitors separately and will add them to your
7305         /// chain::Watch after deserializing this ChannelManager.
7306         pub chain_monitor: M,
7307
7308         /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
7309         /// used to broadcast the latest local commitment transactions of channels which must be
7310         /// force-closed during deserialization.
7311         pub tx_broadcaster: T,
7312         /// The router which will be used in the ChannelManager in the future for finding routes
7313         /// on-the-fly for trampoline payments. Absent in private nodes that don't support forwarding.
7314         ///
7315         /// No calls to the router will be made during deserialization.
7316         pub router: R,
7317         /// The Logger for use in the ChannelManager and which may be used to log information during
7318         /// deserialization.
7319         pub logger: L,
7320         /// Default settings used for new channels. Any existing channels will continue to use the
7321         /// runtime settings which were stored when the ChannelManager was serialized.
7322         pub default_config: UserConfig,
7323
7324         /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
7325         /// value.get_funding_txo() should be the key).
7326         ///
7327         /// If a monitor is inconsistent with the channel state during deserialization the channel will
7328         /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
7329         /// is true for missing channels as well. If there is a monitor missing for which we find
7330         /// channel data Err(DecodeError::InvalidValue) will be returned.
7331         ///
7332         /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
7333         /// this struct.
7334         ///
7335         /// This is not exported to bindings users because we have no HashMap bindings
7336         pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>,
7337 }
7338
7339 impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
7340                 ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>
7341 where
7342         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
7343         T::Target: BroadcasterInterface,
7344         ES::Target: EntropySource,
7345         NS::Target: NodeSigner,
7346         SP::Target: SignerProvider,
7347         F::Target: FeeEstimator,
7348         R::Target: Router,
7349         L::Target: Logger,
7350 {
7351         /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
7352         /// HashMap for you. This is primarily useful for C bindings where it is not practical to
7353         /// populate a HashMap directly from C.
7354         pub fn new(entropy_source: ES, node_signer: NS, signer_provider: SP, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, router: R, logger: L, default_config: UserConfig,
7355                         mut channel_monitors: Vec<&'a mut ChannelMonitor<<SP::Target as SignerProvider>::Signer>>) -> Self {
7356                 Self {
7357                         entropy_source, node_signer, signer_provider, fee_estimator, chain_monitor, tx_broadcaster, router, logger, default_config,
7358                         channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
7359                 }
7360         }
7361 }
7362
7363 // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
7364 // SipmleArcChannelManager type:
7365 impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
7366         ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, Arc<ChannelManager<M, T, ES, NS, SP, F, R, L>>)
7367 where
7368         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
7369         T::Target: BroadcasterInterface,
7370         ES::Target: EntropySource,
7371         NS::Target: NodeSigner,
7372         SP::Target: SignerProvider,
7373         F::Target: FeeEstimator,
7374         R::Target: Router,
7375         L::Target: Logger,
7376 {
7377         fn read<Reader: io::Read>(reader: &mut Reader, args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
7378                 let (blockhash, chan_manager) = <(BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, L>)>::read(reader, args)?;
7379                 Ok((blockhash, Arc::new(chan_manager)))
7380         }
7381 }
7382
7383 impl<'a, M: Deref, T: Deref, ES: Deref, NS: Deref, SP: Deref, F: Deref, R: Deref, L: Deref>
7384         ReadableArgs<ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>> for (BlockHash, ChannelManager<M, T, ES, NS, SP, F, R, L>)
7385 where
7386         M::Target: chain::Watch<<SP::Target as SignerProvider>::Signer>,
7387         T::Target: BroadcasterInterface,
7388         ES::Target: EntropySource,
7389         NS::Target: NodeSigner,
7390         SP::Target: SignerProvider,
7391         F::Target: FeeEstimator,
7392         R::Target: Router,
7393         L::Target: Logger,
7394 {
7395         fn read<Reader: io::Read>(reader: &mut Reader, mut args: ChannelManagerReadArgs<'a, M, T, ES, NS, SP, F, R, L>) -> Result<Self, DecodeError> {
7396                 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7397
7398                 let genesis_hash: BlockHash = Readable::read(reader)?;
7399                 let best_block_height: u32 = Readable::read(reader)?;
7400                 let best_block_hash: BlockHash = Readable::read(reader)?;
7401
7402                 let mut failed_htlcs = Vec::new();
7403
7404                 let channel_count: u64 = Readable::read(reader)?;
7405                 let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
7406                 let mut peer_channels: HashMap<PublicKey, HashMap<[u8; 32], Channel<<SP::Target as SignerProvider>::Signer>>> = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
7407                 let mut id_to_peer = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
7408                 let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
7409                 let mut channel_closures = Vec::new();
7410                 let mut pending_background_events = Vec::new();
7411                 for _ in 0..channel_count {
7412                         let mut channel: Channel<<SP::Target as SignerProvider>::Signer> = Channel::read(reader, (
7413                                 &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config)
7414                         ))?;
7415                         let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
7416                         funding_txo_set.insert(funding_txo.clone());
7417                         if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
7418                                 if channel.get_cur_holder_commitment_transaction_number() < monitor.get_cur_holder_commitment_number() ||
7419                                                 channel.get_revoked_counterparty_commitment_transaction_number() < monitor.get_min_seen_secret() ||
7420                                                 channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
7421                                                 channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
7422                                         // If the channel is ahead of the monitor, return InvalidValue:
7423                                         log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
7424                                         log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
7425                                                 log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
7426                                         log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
7427                                         log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
7428                                         log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
7429                                         log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
7430                                         return Err(DecodeError::InvalidValue);
7431                                 } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
7432                                                 channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
7433                                                 channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
7434                                                 channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
7435                                         // But if the channel is behind of the monitor, close the channel:
7436                                         log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
7437                                         log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
7438                                         log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
7439                                                 log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
7440                                         let (monitor_update, mut new_failed_htlcs) = channel.force_shutdown(true);
7441                                         if let Some(monitor_update) = monitor_update {
7442                                                 pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate(monitor_update));
7443                                         }
7444                                         failed_htlcs.append(&mut new_failed_htlcs);
7445                                         channel_closures.push(events::Event::ChannelClosed {
7446                                                 channel_id: channel.channel_id(),
7447                                                 user_channel_id: channel.get_user_id(),
7448                                                 reason: ClosureReason::OutdatedChannelManager
7449                                         });
7450                                         for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
7451                                                 let mut found_htlc = false;
7452                                                 for (monitor_htlc_source, _) in monitor.get_all_current_outbound_htlcs() {
7453                                                         if *channel_htlc_source == monitor_htlc_source { found_htlc = true; break; }
7454                                                 }
7455                                                 if !found_htlc {
7456                                                         // If we have some HTLCs in the channel which are not present in the newer
7457                                                         // ChannelMonitor, they have been removed and should be failed back to
7458                                                         // ensure we don't forget them entirely. Note that if the missing HTLC(s)
7459                                                         // were actually claimed we'd have generated and ensured the previous-hop
7460                                                         // claim update ChannelMonitor updates were persisted prior to persising
7461                                                         // the ChannelMonitor update for the forward leg, so attempting to fail the
7462                                                         // backwards leg of the HTLC will simply be rejected.
7463                                                         log_info!(args.logger,
7464                                                                 "Failing HTLC with hash {} as it is missing in the ChannelMonitor for channel {} but was present in the (stale) ChannelManager",
7465                                                                 log_bytes!(channel.channel_id()), log_bytes!(payment_hash.0));
7466                                                         failed_htlcs.push((channel_htlc_source.clone(), *payment_hash, channel.get_counterparty_node_id(), channel.channel_id()));
7467                                                 }
7468                                         }
7469                                 } else {
7470                                         log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
7471                                         if let Some(short_channel_id) = channel.get_short_channel_id() {
7472                                                 short_to_chan_info.insert(short_channel_id, (channel.get_counterparty_node_id(), channel.channel_id()));
7473                                         }
7474                                         if channel.is_funding_initiated() {
7475                                                 id_to_peer.insert(channel.channel_id(), channel.get_counterparty_node_id());
7476                                         }
7477                                         match peer_channels.entry(channel.get_counterparty_node_id()) {
7478                                                 hash_map::Entry::Occupied(mut entry) => {
7479                                                         let by_id_map = entry.get_mut();
7480                                                         by_id_map.insert(channel.channel_id(), channel);
7481                                                 },
7482                                                 hash_map::Entry::Vacant(entry) => {
7483                                                         let mut by_id_map = HashMap::new();
7484                                                         by_id_map.insert(channel.channel_id(), channel);
7485                                                         entry.insert(by_id_map);
7486                                                 }
7487                                         }
7488                                 }
7489                         } else if channel.is_awaiting_initial_mon_persist() {
7490                                 // If we were persisted and shut down while the initial ChannelMonitor persistence
7491                                 // was in-progress, we never broadcasted the funding transaction and can still
7492                                 // safely discard the channel.
7493                                 let _ = channel.force_shutdown(false);
7494                                 channel_closures.push(events::Event::ChannelClosed {
7495                                         channel_id: channel.channel_id(),
7496                                         user_channel_id: channel.get_user_id(),
7497                                         reason: ClosureReason::DisconnectedPeer,
7498                                 });
7499                         } else {
7500                                 log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
7501                                 log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
7502                                 log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
7503                                 log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
7504                                 log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
7505                                 return Err(DecodeError::InvalidValue);
7506                         }
7507                 }
7508
7509                 for (funding_txo, _) in args.channel_monitors.iter() {
7510                         if !funding_txo_set.contains(funding_txo) {
7511                                 let monitor_update = ChannelMonitorUpdate {
7512                                         update_id: CLOSED_CHANNEL_UPDATE_ID,
7513                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }],
7514                                 };
7515                                 pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((*funding_txo, monitor_update)));
7516                         }
7517                 }
7518
7519                 const MAX_ALLOC_SIZE: usize = 1024 * 64;
7520                 let forward_htlcs_count: u64 = Readable::read(reader)?;
7521                 let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
7522                 for _ in 0..forward_htlcs_count {
7523                         let short_channel_id = Readable::read(reader)?;
7524                         let pending_forwards_count: u64 = Readable::read(reader)?;
7525                         let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::<HTLCForwardInfo>()));
7526                         for _ in 0..pending_forwards_count {
7527                                 pending_forwards.push(Readable::read(reader)?);
7528                         }
7529                         forward_htlcs.insert(short_channel_id, pending_forwards);
7530                 }
7531
7532                 let claimable_htlcs_count: u64 = Readable::read(reader)?;
7533                 let mut claimable_htlcs_list = Vec::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
7534                 for _ in 0..claimable_htlcs_count {
7535                         let payment_hash = Readable::read(reader)?;
7536                         let previous_hops_len: u64 = Readable::read(reader)?;
7537                         let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
7538                         for _ in 0..previous_hops_len {
7539                                 previous_hops.push(<ClaimableHTLC as Readable>::read(reader)?);
7540                         }
7541                         claimable_htlcs_list.push((payment_hash, previous_hops));
7542                 }
7543
7544                 let peer_count: u64 = Readable::read(reader)?;
7545                 let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState<<SP::Target as SignerProvider>::Signer>>)>()));
7546                 for _ in 0..peer_count {
7547                         let peer_pubkey = Readable::read(reader)?;
7548                         let peer_state = PeerState {
7549                                 channel_by_id: peer_channels.remove(&peer_pubkey).unwrap_or(HashMap::new()),
7550                                 latest_features: Readable::read(reader)?,
7551                                 pending_msg_events: Vec::new(),
7552                                 monitor_update_blocked_actions: BTreeMap::new(),
7553                                 is_connected: false,
7554                         };
7555                         per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
7556                 }
7557
7558                 let event_count: u64 = Readable::read(reader)?;
7559                 let mut pending_events_read: Vec<events::Event> = Vec::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<events::Event>()));
7560                 for _ in 0..event_count {
7561                         match MaybeReadable::read(reader)? {
7562                                 Some(event) => pending_events_read.push(event),
7563                                 None => continue,
7564                         }
7565                 }
7566
7567                 let background_event_count: u64 = Readable::read(reader)?;
7568                 for _ in 0..background_event_count {
7569                         match <u8 as Readable>::read(reader)? {
7570                                 0 => {
7571                                         let (funding_txo, monitor_update): (OutPoint, ChannelMonitorUpdate) = (Readable::read(reader)?, Readable::read(reader)?);
7572                                         if pending_background_events.iter().find(|e| {
7573                                                 let BackgroundEvent::ClosingMonitorUpdate((pending_funding_txo, pending_monitor_update)) = e;
7574                                                 *pending_funding_txo == funding_txo && *pending_monitor_update == monitor_update
7575                                         }).is_none() {
7576                                                 pending_background_events.push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)));
7577                                         }
7578                                 }
7579                                 _ => return Err(DecodeError::InvalidValue),
7580                         }
7581                 }
7582
7583                 let _last_node_announcement_serial: u32 = Readable::read(reader)?; // Only used < 0.0.111
7584                 let highest_seen_timestamp: u32 = Readable::read(reader)?;
7585
7586                 let pending_inbound_payment_count: u64 = Readable::read(reader)?;
7587                 let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
7588                 for _ in 0..pending_inbound_payment_count {
7589                         if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
7590                                 return Err(DecodeError::InvalidValue);
7591                         }
7592                 }
7593
7594                 let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
7595                 let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
7596                         HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
7597                 for _ in 0..pending_outbound_payments_count_compat {
7598                         let session_priv = Readable::read(reader)?;
7599                         let payment = PendingOutboundPayment::Legacy {
7600                                 session_privs: [session_priv].iter().cloned().collect()
7601                         };
7602                         if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
7603                                 return Err(DecodeError::InvalidValue)
7604                         };
7605                 }
7606
7607                 // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
7608                 let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
7609                 let mut pending_outbound_payments = None;
7610                 let mut pending_intercepted_htlcs: Option<HashMap<InterceptId, PendingAddHTLCInfo>> = Some(HashMap::new());
7611                 let mut received_network_pubkey: Option<PublicKey> = None;
7612                 let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
7613                 let mut probing_cookie_secret: Option<[u8; 32]> = None;
7614                 let mut claimable_htlc_purposes = None;
7615                 let mut pending_claiming_payments = Some(HashMap::new());
7616                 let mut monitor_update_blocked_actions_per_peer = Some(Vec::new());
7617                 read_tlv_fields!(reader, {
7618                         (1, pending_outbound_payments_no_retry, option),
7619                         (2, pending_intercepted_htlcs, option),
7620                         (3, pending_outbound_payments, option),
7621                         (4, pending_claiming_payments, option),
7622                         (5, received_network_pubkey, option),
7623                         (6, monitor_update_blocked_actions_per_peer, option),
7624                         (7, fake_scid_rand_bytes, option),
7625                         (9, claimable_htlc_purposes, vec_type),
7626                         (11, probing_cookie_secret, option),
7627                 });
7628                 if fake_scid_rand_bytes.is_none() {
7629                         fake_scid_rand_bytes = Some(args.entropy_source.get_secure_random_bytes());
7630                 }
7631
7632                 if probing_cookie_secret.is_none() {
7633                         probing_cookie_secret = Some(args.entropy_source.get_secure_random_bytes());
7634                 }
7635
7636                 if !channel_closures.is_empty() {
7637                         pending_events_read.append(&mut channel_closures);
7638                 }
7639
7640                 if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
7641                         pending_outbound_payments = Some(pending_outbound_payments_compat);
7642                 } else if pending_outbound_payments.is_none() {
7643                         let mut outbounds = HashMap::new();
7644                         for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
7645                                 outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
7646                         }
7647                         pending_outbound_payments = Some(outbounds);
7648                 }
7649                 let pending_outbounds = OutboundPayments {
7650                         pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
7651                         retry_lock: Mutex::new(())
7652                 };
7653
7654                 {
7655                         // If we're tracking pending payments, ensure we haven't lost any by looking at the
7656                         // ChannelMonitor data for any channels for which we do not have authorative state
7657                         // (i.e. those for which we just force-closed above or we otherwise don't have a
7658                         // corresponding `Channel` at all).
7659                         // This avoids several edge-cases where we would otherwise "forget" about pending
7660                         // payments which are still in-flight via their on-chain state.
7661                         // We only rebuild the pending payments map if we were most recently serialized by
7662                         // 0.0.102+
7663                         for (_, monitor) in args.channel_monitors.iter() {
7664                                 if id_to_peer.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
7665                                         for (htlc_source, (htlc, _)) in monitor.get_pending_or_resolved_outbound_htlcs() {
7666                                                 if let HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } = htlc_source {
7667                                                         if path.is_empty() {
7668                                                                 log_error!(args.logger, "Got an empty path for a pending payment");
7669                                                                 return Err(DecodeError::InvalidValue);
7670                                                         }
7671
7672                                                         let path_amt = path.last().unwrap().fee_msat;
7673                                                         let mut session_priv_bytes = [0; 32];
7674                                                         session_priv_bytes[..].copy_from_slice(&session_priv[..]);
7675                                                         match pending_outbounds.pending_outbound_payments.lock().unwrap().entry(payment_id) {
7676                                                                 hash_map::Entry::Occupied(mut entry) => {
7677                                                                         let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
7678                                                                         log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
7679                                                                                 if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
7680                                                                 },
7681                                                                 hash_map::Entry::Vacant(entry) => {
7682                                                                         let path_fee = path.get_path_fees();
7683                                                                         entry.insert(PendingOutboundPayment::Retryable {
7684                                                                                 retry_strategy: None,
7685                                                                                 attempts: PaymentAttempts::new(),
7686                                                                                 payment_params: None,
7687                                                                                 session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
7688                                                                                 payment_hash: htlc.payment_hash,
7689                                                                                 payment_secret: None, // only used for retries, and we'll never retry on startup
7690                                                                                 keysend_preimage: None, // only used for retries, and we'll never retry on startup
7691                                                                                 pending_amt_msat: path_amt,
7692                                                                                 pending_fee_msat: Some(path_fee),
7693                                                                                 total_msat: path_amt,
7694                                                                                 starting_block_height: best_block_height,
7695                                                                         });
7696                                                                         log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
7697                                                                                 path_amt, log_bytes!(htlc.payment_hash.0),  log_bytes!(session_priv_bytes));
7698                                                                 }
7699                                                         }
7700                                                 }
7701                                         }
7702                                         for (htlc_source, (htlc, preimage_opt)) in monitor.get_all_current_outbound_htlcs() {
7703                                                 match htlc_source {
7704                                                         HTLCSource::PreviousHopData(prev_hop_data) => {
7705                                                                 let pending_forward_matches_htlc = |info: &PendingAddHTLCInfo| {
7706                                                                         info.prev_funding_outpoint == prev_hop_data.outpoint &&
7707                                                                                 info.prev_htlc_id == prev_hop_data.htlc_id
7708                                                                 };
7709                                                                 // The ChannelMonitor is now responsible for this HTLC's
7710                                                                 // failure/success and will let us know what its outcome is. If we
7711                                                                 // still have an entry for this HTLC in `forward_htlcs` or
7712                                                                 // `pending_intercepted_htlcs`, we were apparently not persisted after
7713                                                                 // the monitor was when forwarding the payment.
7714                                                                 forward_htlcs.retain(|_, forwards| {
7715                                                                         forwards.retain(|forward| {
7716                                                                                 if let HTLCForwardInfo::AddHTLC(htlc_info) = forward {
7717                                                                                         if pending_forward_matches_htlc(&htlc_info) {
7718                                                                                                 log_info!(args.logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}",
7719                                                                                                         log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
7720                                                                                                 false
7721                                                                                         } else { true }
7722                                                                                 } else { true }
7723                                                                         });
7724                                                                         !forwards.is_empty()
7725                                                                 });
7726                                                                 pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| {
7727                                                                         if pending_forward_matches_htlc(&htlc_info) {
7728                                                                                 log_info!(args.logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}",
7729                                                                                         log_bytes!(htlc.payment_hash.0), log_bytes!(monitor.get_funding_txo().0.to_channel_id()));
7730                                                                                 pending_events_read.retain(|event| {
7731                                                                                         if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event {
7732                                                                                                 intercepted_id != ev_id
7733                                                                                         } else { true }
7734                                                                                 });
7735                                                                                 false
7736                                                                         } else { true }
7737                                                                 });
7738                                                         },
7739                                                         HTLCSource::OutboundRoute { payment_id, session_priv, path, .. } => {
7740                                                                 if let Some(preimage) = preimage_opt {
7741                                                                         let pending_events = Mutex::new(pending_events_read);
7742                                                                         // Note that we set `from_onchain` to "false" here,
7743                                                                         // deliberately keeping the pending payment around forever.
7744                                                                         // Given it should only occur when we have a channel we're
7745                                                                         // force-closing for being stale that's okay.
7746                                                                         // The alternative would be to wipe the state when claiming,
7747                                                                         // generating a `PaymentPathSuccessful` event but regenerating
7748                                                                         // it and the `PaymentSent` on every restart until the
7749                                                                         // `ChannelMonitor` is removed.
7750                                                                         pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
7751                                                                         pending_events_read = pending_events.into_inner().unwrap();
7752                                                                 }
7753                                                         },
7754                                                 }
7755                                         }
7756                                 }
7757                         }
7758                 }
7759
7760                 if !forward_htlcs.is_empty() || pending_outbounds.needs_abandon() {
7761                         // If we have pending HTLCs to forward, assume we either dropped a
7762                         // `PendingHTLCsForwardable` or the user received it but never processed it as they
7763                         // shut down before the timer hit. Either way, set the time_forwardable to a small
7764                         // constant as enough time has likely passed that we should simply handle the forwards
7765                         // now, or at least after the user gets a chance to reconnect to our peers.
7766                         pending_events_read.push(events::Event::PendingHTLCsForwardable {
7767                                 time_forwardable: Duration::from_secs(2),
7768                         });
7769                 }
7770
7771                 let inbound_pmt_key_material = args.node_signer.get_inbound_payment_key_material();
7772                 let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
7773
7774                 let mut claimable_htlcs = HashMap::with_capacity(claimable_htlcs_list.len());
7775                 if let Some(mut purposes) = claimable_htlc_purposes {
7776                         if purposes.len() != claimable_htlcs_list.len() {
7777                                 return Err(DecodeError::InvalidValue);
7778                         }
7779                         for (purpose, (payment_hash, previous_hops)) in purposes.drain(..).zip(claimable_htlcs_list.drain(..)) {
7780                                 claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
7781                         }
7782                 } else {
7783                         // LDK versions prior to 0.0.107 did not write a `pending_htlc_purposes`, but do
7784                         // include a `_legacy_hop_data` in the `OnionPayload`.
7785                         for (payment_hash, previous_hops) in claimable_htlcs_list.drain(..) {
7786                                 if previous_hops.is_empty() {
7787                                         return Err(DecodeError::InvalidValue);
7788                                 }
7789                                 let purpose = match &previous_hops[0].onion_payload {
7790                                         OnionPayload::Invoice { _legacy_hop_data } => {
7791                                                 if let Some(hop_data) = _legacy_hop_data {
7792                                                         events::PaymentPurpose::InvoicePayment {
7793                                                                 payment_preimage: match pending_inbound_payments.get(&payment_hash) {
7794                                                                         Some(inbound_payment) => inbound_payment.payment_preimage,
7795                                                                         None => match inbound_payment::verify(payment_hash, &hop_data, 0, &expanded_inbound_key, &args.logger) {
7796                                                                                 Ok((payment_preimage, _)) => payment_preimage,
7797                                                                                 Err(()) => {
7798                                                                                         log_error!(args.logger, "Failed to read claimable payment data for HTLC with payment hash {} - was not a pending inbound payment and didn't match our payment key", log_bytes!(payment_hash.0));
7799                                                                                         return Err(DecodeError::InvalidValue);
7800                                                                                 }
7801                                                                         }
7802                                                                 },
7803                                                                 payment_secret: hop_data.payment_secret,
7804                                                         }
7805                                                 } else { return Err(DecodeError::InvalidValue); }
7806                                         },
7807                                         OnionPayload::Spontaneous(payment_preimage) =>
7808                                                 events::PaymentPurpose::SpontaneousPayment(*payment_preimage),
7809                                 };
7810                                 claimable_htlcs.insert(payment_hash, (purpose, previous_hops));
7811                         }
7812                 }
7813
7814                 let mut secp_ctx = Secp256k1::new();
7815                 secp_ctx.seeded_randomize(&args.entropy_source.get_secure_random_bytes());
7816
7817                 let our_network_pubkey = match args.node_signer.get_node_id(Recipient::Node) {
7818                         Ok(key) => key,
7819                         Err(()) => return Err(DecodeError::InvalidValue)
7820                 };
7821                 if let Some(network_pubkey) = received_network_pubkey {
7822                         if network_pubkey != our_network_pubkey {
7823                                 log_error!(args.logger, "Key that was generated does not match the existing key.");
7824                                 return Err(DecodeError::InvalidValue);
7825                         }
7826                 }
7827
7828                 let mut outbound_scid_aliases = HashSet::new();
7829                 for (_peer_node_id, peer_state_mutex) in per_peer_state.iter_mut() {
7830                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7831                         let peer_state = &mut *peer_state_lock;
7832                         for (chan_id, chan) in peer_state.channel_by_id.iter_mut() {
7833                                 if chan.outbound_scid_alias() == 0 {
7834                                         let mut outbound_scid_alias;
7835                                         loop {
7836                                                 outbound_scid_alias = fake_scid::Namespace::OutboundAlias
7837                                                         .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.entropy_source);
7838                                                 if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
7839                                         }
7840                                         chan.set_outbound_scid_alias(outbound_scid_alias);
7841                                 } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
7842                                         // Note that in rare cases its possible to hit this while reading an older
7843                                         // channel if we just happened to pick a colliding outbound alias above.
7844                                         log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
7845                                         return Err(DecodeError::InvalidValue);
7846                                 }
7847                                 if chan.is_usable() {
7848                                         if short_to_chan_info.insert(chan.outbound_scid_alias(), (chan.get_counterparty_node_id(), *chan_id)).is_some() {
7849                                                 // Note that in rare cases its possible to hit this while reading an older
7850                                                 // channel if we just happened to pick a colliding outbound alias above.
7851                                                 log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
7852                                                 return Err(DecodeError::InvalidValue);
7853                                         }
7854                                 }
7855                         }
7856                 }
7857
7858                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(args.fee_estimator);
7859
7860                 for (_, monitor) in args.channel_monitors.iter() {
7861                         for (payment_hash, payment_preimage) in monitor.get_stored_preimages() {
7862                                 if let Some((payment_purpose, claimable_htlcs)) = claimable_htlcs.remove(&payment_hash) {
7863                                         log_info!(args.logger, "Re-claiming HTLCs with payment hash {} as we've released the preimage to a ChannelMonitor!", log_bytes!(payment_hash.0));
7864                                         let mut claimable_amt_msat = 0;
7865                                         let mut receiver_node_id = Some(our_network_pubkey);
7866                                         let phantom_shared_secret = claimable_htlcs[0].prev_hop.phantom_shared_secret;
7867                                         if phantom_shared_secret.is_some() {
7868                                                 let phantom_pubkey = args.node_signer.get_node_id(Recipient::PhantomNode)
7869                                                         .expect("Failed to get node_id for phantom node recipient");
7870                                                 receiver_node_id = Some(phantom_pubkey)
7871                                         }
7872                                         for claimable_htlc in claimable_htlcs {
7873                                                 claimable_amt_msat += claimable_htlc.value;
7874
7875                                                 // Add a holding-cell claim of the payment to the Channel, which should be
7876                                                 // applied ~immediately on peer reconnection. Because it won't generate a
7877                                                 // new commitment transaction we can just provide the payment preimage to
7878                                                 // the corresponding ChannelMonitor and nothing else.
7879                                                 //
7880                                                 // We do so directly instead of via the normal ChannelMonitor update
7881                                                 // procedure as the ChainMonitor hasn't yet been initialized, implying
7882                                                 // we're not allowed to call it directly yet. Further, we do the update
7883                                                 // without incrementing the ChannelMonitor update ID as there isn't any
7884                                                 // reason to.
7885                                                 // If we were to generate a new ChannelMonitor update ID here and then
7886                                                 // crash before the user finishes block connect we'd end up force-closing
7887                                                 // this channel as well. On the flip side, there's no harm in restarting
7888                                                 // without the new monitor persisted - we'll end up right back here on
7889                                                 // restart.
7890                                                 let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id();
7891                                                 if let Some(peer_node_id) = id_to_peer.get(&previous_channel_id){
7892                                                         let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap();
7893                                                         let mut peer_state_lock = peer_state_mutex.lock().unwrap();
7894                                                         let peer_state = &mut *peer_state_lock;
7895                                                         if let Some(channel) = peer_state.channel_by_id.get_mut(&previous_channel_id) {
7896                                                                 channel.claim_htlc_while_disconnected_dropping_mon_update(claimable_htlc.prev_hop.htlc_id, payment_preimage, &args.logger);
7897                                                         }
7898                                                 }
7899                                                 if let Some(previous_hop_monitor) = args.channel_monitors.get(&claimable_htlc.prev_hop.outpoint) {
7900                                                         previous_hop_monitor.provide_payment_preimage(&payment_hash, &payment_preimage, &args.tx_broadcaster, &bounded_fee_estimator, &args.logger);
7901                                                 }
7902                                         }
7903                                         pending_events_read.push(events::Event::PaymentClaimed {
7904                                                 receiver_node_id,
7905                                                 payment_hash,
7906                                                 purpose: payment_purpose,
7907                                                 amount_msat: claimable_amt_msat,
7908                                         });
7909                                 }
7910                         }
7911                 }
7912
7913                 for (node_id, monitor_update_blocked_actions) in monitor_update_blocked_actions_per_peer.unwrap() {
7914                         if let Some(peer_state) = per_peer_state.get_mut(&node_id) {
7915                                 peer_state.lock().unwrap().monitor_update_blocked_actions = monitor_update_blocked_actions;
7916                         } else {
7917                                 log_error!(args.logger, "Got blocked actions without a per-peer-state for {}", node_id);
7918                                 return Err(DecodeError::InvalidValue);
7919                         }
7920                 }
7921
7922                 let channel_manager = ChannelManager {
7923                         genesis_hash,
7924                         fee_estimator: bounded_fee_estimator,
7925                         chain_monitor: args.chain_monitor,
7926                         tx_broadcaster: args.tx_broadcaster,
7927                         router: args.router,
7928
7929                         best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
7930
7931                         inbound_payment_key: expanded_inbound_key,
7932                         pending_inbound_payments: Mutex::new(pending_inbound_payments),
7933                         pending_outbound_payments: pending_outbounds,
7934                         pending_intercepted_htlcs: Mutex::new(pending_intercepted_htlcs.unwrap()),
7935
7936                         forward_htlcs: Mutex::new(forward_htlcs),
7937                         claimable_payments: Mutex::new(ClaimablePayments { claimable_htlcs, pending_claiming_payments: pending_claiming_payments.unwrap() }),
7938                         outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
7939                         id_to_peer: Mutex::new(id_to_peer),
7940                         short_to_chan_info: FairRwLock::new(short_to_chan_info),
7941                         fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
7942
7943                         probing_cookie_secret: probing_cookie_secret.unwrap(),
7944
7945                         our_network_pubkey,
7946                         secp_ctx,
7947
7948                         highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
7949
7950                         per_peer_state: FairRwLock::new(per_peer_state),
7951
7952                         pending_events: Mutex::new(pending_events_read),
7953                         pending_background_events: Mutex::new(pending_background_events),
7954                         total_consistency_lock: RwLock::new(()),
7955                         persistence_notifier: Notifier::new(),
7956
7957                         entropy_source: args.entropy_source,
7958                         node_signer: args.node_signer,
7959                         signer_provider: args.signer_provider,
7960
7961                         logger: args.logger,
7962                         default_configuration: args.default_config,
7963                 };
7964
7965                 for htlc_source in failed_htlcs.drain(..) {
7966                         let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
7967                         let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
7968                         let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
7969                         channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
7970                 }
7971
7972                 //TODO: Broadcast channel update for closed channels, but only after we've made a
7973                 //connection or two.
7974
7975                 Ok((best_block_hash.clone(), channel_manager))
7976         }
7977 }
7978
7979 #[cfg(test)]
7980 mod tests {
7981         use bitcoin::hashes::Hash;
7982         use bitcoin::hashes::sha256::Hash as Sha256;
7983         use bitcoin::secp256k1::{PublicKey, Secp256k1, SecretKey};
7984         #[cfg(feature = "std")]
7985         use core::time::Duration;
7986         use core::sync::atomic::Ordering;
7987         use crate::events::{Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
7988         use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret};
7989         use crate::ln::channelmanager::{inbound_payment, PaymentId, PaymentSendFailure, RecipientOnionFields, InterceptId};
7990         use crate::ln::functional_test_utils::*;
7991         use crate::ln::msgs;
7992         use crate::ln::msgs::ChannelMessageHandler;
7993         use crate::routing::router::{PaymentParameters, RouteParameters, find_route};
7994         use crate::util::errors::APIError;
7995         use crate::util::test_utils;
7996         use crate::util::config::ChannelConfig;
7997         use crate::chain::keysinterface::EntropySource;
7998
7999         #[test]
8000         fn test_notify_limits() {
8001                 // Check that a few cases which don't require the persistence of a new ChannelManager,
8002                 // indeed, do not cause the persistence of a new ChannelManager.
8003                 let chanmon_cfgs = create_chanmon_cfgs(3);
8004                 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8005                 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8006                 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8007
8008                 // All nodes start with a persistable update pending as `create_network` connects each node
8009                 // with all other nodes to make most tests simpler.
8010                 assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
8011                 assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
8012                 assert!(nodes[2].node.get_persistable_update_future().poll_is_complete());
8013
8014                 let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1);
8015
8016                 // We check that the channel info nodes have doesn't change too early, even though we try
8017                 // to connect messages with new values
8018                 chan.0.contents.fee_base_msat *= 2;
8019                 chan.1.contents.fee_base_msat *= 2;
8020                 let node_a_chan_info = nodes[0].node.list_channels_with_counterparty(
8021                         &nodes[1].node.get_our_node_id()).pop().unwrap();
8022                 let node_b_chan_info = nodes[1].node.list_channels_with_counterparty(
8023                         &nodes[0].node.get_our_node_id()).pop().unwrap();
8024
8025                 // The first two nodes (which opened a channel) should now require fresh persistence
8026                 assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
8027                 assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
8028                 // ... but the last node should not.
8029                 assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
8030                 // After persisting the first two nodes they should no longer need fresh persistence.
8031                 assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
8032                 assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
8033
8034                 // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
8035                 // about the channel.
8036                 nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
8037                 nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
8038                 assert!(!nodes[2].node.get_persistable_update_future().poll_is_complete());
8039
8040                 // The nodes which are a party to the channel should also ignore messages from unrelated
8041                 // parties.
8042                 nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
8043                 nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
8044                 nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
8045                 nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
8046                 assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
8047                 assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
8048
8049                 // At this point the channel info given by peers should still be the same.
8050                 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
8051                 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
8052
8053                 // An earlier version of handle_channel_update didn't check the directionality of the
8054                 // update message and would always update the local fee info, even if our peer was
8055                 // (spuriously) forwarding us our own channel_update.
8056                 let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
8057                 let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
8058                 let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
8059
8060                 // First deliver each peers' own message, checking that the node doesn't need to be
8061                 // persisted and that its channel info remains the same.
8062                 nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
8063                 nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
8064                 assert!(!nodes[0].node.get_persistable_update_future().poll_is_complete());
8065                 assert!(!nodes[1].node.get_persistable_update_future().poll_is_complete());
8066                 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
8067                 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
8068
8069                 // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
8070                 // the channel info has updated.
8071                 nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
8072                 nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
8073                 assert!(nodes[0].node.get_persistable_update_future().poll_is_complete());
8074                 assert!(nodes[1].node.get_persistable_update_future().poll_is_complete());
8075                 assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
8076                 assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
8077         }
8078
8079         #[test]
8080         fn test_keysend_dup_hash_partial_mpp() {
8081                 // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
8082                 // expected.
8083                 let chanmon_cfgs = create_chanmon_cfgs(2);
8084                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8085                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8086                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8087                 create_announced_chan_between_nodes(&nodes, 0, 1);
8088
8089                 // First, send a partial MPP payment.
8090                 let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
8091                 let mut mpp_route = route.clone();
8092                 mpp_route.paths.push(mpp_route.paths[0].clone());
8093
8094                 let payment_id = PaymentId([42; 32]);
8095                 // Use the utility function send_payment_along_path to send the payment with MPP data which
8096                 // indicates there are more HTLCs coming.
8097                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
8098                 let session_privs = nodes[0].node.test_add_new_pending_payment(our_payment_hash,
8099                         RecipientOnionFields::secret_only(payment_secret), payment_id, &mpp_route).unwrap();
8100                 nodes[0].node.test_send_payment_along_path(&mpp_route.paths[0], &our_payment_hash,
8101                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[0]).unwrap();
8102                 check_added_monitors!(nodes[0], 1);
8103                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8104                 assert_eq!(events.len(), 1);
8105                 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
8106
8107                 // Next, send a keysend payment with the same payment_hash and make sure it fails.
8108                 nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
8109                         RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
8110                 check_added_monitors!(nodes[0], 1);
8111                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8112                 assert_eq!(events.len(), 1);
8113                 let ev = events.drain(..).next().unwrap();
8114                 let payment_event = SendEvent::from_event(ev);
8115                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8116                 check_added_monitors!(nodes[1], 0);
8117                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8118                 expect_pending_htlcs_forwardable!(nodes[1]);
8119                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash: our_payment_hash }]);
8120                 check_added_monitors!(nodes[1], 1);
8121                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8122                 assert!(updates.update_add_htlcs.is_empty());
8123                 assert!(updates.update_fulfill_htlcs.is_empty());
8124                 assert_eq!(updates.update_fail_htlcs.len(), 1);
8125                 assert!(updates.update_fail_malformed_htlcs.is_empty());
8126                 assert!(updates.update_fee.is_none());
8127                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
8128                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
8129                 expect_payment_failed!(nodes[0], our_payment_hash, true);
8130
8131                 // Send the second half of the original MPP payment.
8132                 nodes[0].node.test_send_payment_along_path(&mpp_route.paths[1], &our_payment_hash,
8133                         RecipientOnionFields::secret_only(payment_secret), 200_000, cur_height, payment_id, &None, session_privs[1]).unwrap();
8134                 check_added_monitors!(nodes[0], 1);
8135                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8136                 assert_eq!(events.len(), 1);
8137                 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
8138
8139                 // Claim the full MPP payment. Note that we can't use a test utility like
8140                 // claim_funds_along_route because the ordering of the messages causes the second half of the
8141                 // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
8142                 // lightning messages manually.
8143                 nodes[1].node.claim_funds(payment_preimage);
8144                 expect_payment_claimed!(nodes[1], our_payment_hash, 200_000);
8145                 check_added_monitors!(nodes[1], 2);
8146
8147                 let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8148                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
8149                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
8150                 check_added_monitors!(nodes[0], 1);
8151                 let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
8152                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
8153                 check_added_monitors!(nodes[1], 1);
8154                 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8155                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
8156                 check_added_monitors!(nodes[1], 1);
8157                 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
8158                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
8159                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
8160                 check_added_monitors!(nodes[0], 1);
8161                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
8162                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
8163                 let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
8164                 check_added_monitors!(nodes[0], 1);
8165                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
8166                 check_added_monitors!(nodes[1], 1);
8167                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
8168                 check_added_monitors!(nodes[1], 1);
8169                 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
8170                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
8171                 check_added_monitors!(nodes[0], 1);
8172
8173                 // Note that successful MPP payments will generate a single PaymentSent event upon the first
8174                 // path's success and a PaymentPathSuccessful event for each path's success.
8175                 let events = nodes[0].node.get_and_clear_pending_events();
8176                 assert_eq!(events.len(), 3);
8177                 match events[0] {
8178                         Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => {
8179                                 assert_eq!(Some(payment_id), *id);
8180                                 assert_eq!(payment_preimage, *preimage);
8181                                 assert_eq!(our_payment_hash, *hash);
8182                         },
8183                         _ => panic!("Unexpected event"),
8184                 }
8185                 match events[1] {
8186                         Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
8187                                 assert_eq!(payment_id, *actual_payment_id);
8188                                 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
8189                                 assert_eq!(route.paths[0], *path);
8190                         },
8191                         _ => panic!("Unexpected event"),
8192                 }
8193                 match events[2] {
8194                         Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
8195                                 assert_eq!(payment_id, *actual_payment_id);
8196                                 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
8197                                 assert_eq!(route.paths[0], *path);
8198                         },
8199                         _ => panic!("Unexpected event"),
8200                 }
8201         }
8202
8203         #[test]
8204         fn test_keysend_dup_payment_hash() {
8205                 // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
8206                 //      outbound regular payment fails as expected.
8207                 // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
8208                 //      fails as expected.
8209                 let chanmon_cfgs = create_chanmon_cfgs(2);
8210                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8211                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8212                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8213                 create_announced_chan_between_nodes(&nodes, 0, 1);
8214                 let scorer = test_utils::TestScorer::new();
8215                 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
8216
8217                 // To start (1), send a regular payment but don't claim it.
8218                 let expected_route = [&nodes[1]];
8219                 let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
8220
8221                 // Next, attempt a keysend payment and make sure it fails.
8222                 let route_params = RouteParameters {
8223                         payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id(), TEST_FINAL_CLTV),
8224                         final_value_msat: 100_000,
8225                 };
8226                 let route = find_route(
8227                         &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
8228                         None, nodes[0].logger, &scorer, &random_seed_bytes
8229                 ).unwrap();
8230                 nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
8231                         RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
8232                 check_added_monitors!(nodes[0], 1);
8233                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8234                 assert_eq!(events.len(), 1);
8235                 let ev = events.drain(..).next().unwrap();
8236                 let payment_event = SendEvent::from_event(ev);
8237                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8238                 check_added_monitors!(nodes[1], 0);
8239                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8240                 // We have to forward pending HTLCs twice - once tries to forward the payment forward (and
8241                 // fails), the second will process the resulting failure and fail the HTLC backward
8242                 expect_pending_htlcs_forwardable!(nodes[1]);
8243                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
8244                 check_added_monitors!(nodes[1], 1);
8245                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8246                 assert!(updates.update_add_htlcs.is_empty());
8247                 assert!(updates.update_fulfill_htlcs.is_empty());
8248                 assert_eq!(updates.update_fail_htlcs.len(), 1);
8249                 assert!(updates.update_fail_malformed_htlcs.is_empty());
8250                 assert!(updates.update_fee.is_none());
8251                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
8252                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
8253                 expect_payment_failed!(nodes[0], payment_hash, true);
8254
8255                 // Finally, claim the original payment.
8256                 claim_payment(&nodes[0], &expected_route, payment_preimage);
8257
8258                 // To start (2), send a keysend payment but don't claim it.
8259                 let payment_preimage = PaymentPreimage([42; 32]);
8260                 let route = find_route(
8261                         &nodes[0].node.get_our_node_id(), &route_params, &nodes[0].network_graph,
8262                         None, nodes[0].logger, &scorer, &random_seed_bytes
8263                 ).unwrap();
8264                 let payment_hash = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage),
8265                         RecipientOnionFields::spontaneous_empty(), PaymentId(payment_preimage.0)).unwrap();
8266                 check_added_monitors!(nodes[0], 1);
8267                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8268                 assert_eq!(events.len(), 1);
8269                 let event = events.pop().unwrap();
8270                 let path = vec![&nodes[1]];
8271                 pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
8272
8273                 // Next, attempt a regular payment and make sure it fails.
8274                 let payment_secret = PaymentSecret([43; 32]);
8275                 nodes[0].node.send_payment_with_route(&route, payment_hash,
8276                         RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap();
8277                 check_added_monitors!(nodes[0], 1);
8278                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
8279                 assert_eq!(events.len(), 1);
8280                 let ev = events.drain(..).next().unwrap();
8281                 let payment_event = SendEvent::from_event(ev);
8282                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
8283                 check_added_monitors!(nodes[1], 0);
8284                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
8285                 expect_pending_htlcs_forwardable!(nodes[1]);
8286                 expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::FailedPayment { payment_hash }]);
8287                 check_added_monitors!(nodes[1], 1);
8288                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
8289                 assert!(updates.update_add_htlcs.is_empty());
8290                 assert!(updates.update_fulfill_htlcs.is_empty());
8291                 assert_eq!(updates.update_fail_htlcs.len(), 1);
8292                 assert!(updates.update_fail_malformed_htlcs.is_empty());
8293                 assert!(updates.update_fee.is_none());
8294                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
8295                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
8296                 expect_payment_failed!(nodes[0], payment_hash, true);
8297
8298                 // Finally, succeed the keysend payment.
8299                 claim_payment(&nodes[0], &expected_route, payment_preimage);
8300         }
8301
8302         #[test]
8303         fn test_keysend_hash_mismatch() {
8304                 // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
8305                 // preimage doesn't match the msg's payment hash.
8306                 let chanmon_cfgs = create_chanmon_cfgs(2);
8307                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8308                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8309                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8310
8311                 let payer_pubkey = nodes[0].node.get_our_node_id();
8312                 let payee_pubkey = nodes[1].node.get_our_node_id();
8313
8314                 let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
8315                 let route_params = RouteParameters {
8316                         payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
8317                         final_value_msat: 10_000,
8318                 };
8319                 let network_graph = nodes[0].network_graph.clone();
8320                 let first_hops = nodes[0].node.list_usable_channels();
8321                 let scorer = test_utils::TestScorer::new();
8322                 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
8323                 let route = find_route(
8324                         &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
8325                         nodes[0].logger, &scorer, &random_seed_bytes
8326                 ).unwrap();
8327
8328                 let test_preimage = PaymentPreimage([42; 32]);
8329                 let mismatch_payment_hash = PaymentHash([43; 32]);
8330                 let session_privs = nodes[0].node.test_add_new_pending_payment(mismatch_payment_hash,
8331                         RecipientOnionFields::spontaneous_empty(), PaymentId(mismatch_payment_hash.0), &route).unwrap();
8332                 nodes[0].node.test_send_payment_internal(&route, mismatch_payment_hash,
8333                         RecipientOnionFields::spontaneous_empty(), Some(test_preimage), PaymentId(mismatch_payment_hash.0), None, session_privs).unwrap();
8334                 check_added_monitors!(nodes[0], 1);
8335
8336                 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
8337                 assert_eq!(updates.update_add_htlcs.len(), 1);
8338                 assert!(updates.update_fulfill_htlcs.is_empty());
8339                 assert!(updates.update_fail_htlcs.is_empty());
8340                 assert!(updates.update_fail_malformed_htlcs.is_empty());
8341                 assert!(updates.update_fee.is_none());
8342                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8343
8344                 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Payment preimage didn't match payment hash", 1);
8345         }
8346
8347         #[test]
8348         fn test_keysend_msg_with_secret_err() {
8349                 // Test that we error as expected if we receive a keysend payment that includes a payment secret.
8350                 let chanmon_cfgs = create_chanmon_cfgs(2);
8351                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8352                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8353                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8354
8355                 let payer_pubkey = nodes[0].node.get_our_node_id();
8356                 let payee_pubkey = nodes[1].node.get_our_node_id();
8357
8358                 let _chan = create_chan_between_nodes(&nodes[0], &nodes[1]);
8359                 let route_params = RouteParameters {
8360                         payment_params: PaymentParameters::for_keysend(payee_pubkey, 40),
8361                         final_value_msat: 10_000,
8362                 };
8363                 let network_graph = nodes[0].network_graph.clone();
8364                 let first_hops = nodes[0].node.list_usable_channels();
8365                 let scorer = test_utils::TestScorer::new();
8366                 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
8367                 let route = find_route(
8368                         &payer_pubkey, &route_params, &network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
8369                         nodes[0].logger, &scorer, &random_seed_bytes
8370                 ).unwrap();
8371
8372                 let test_preimage = PaymentPreimage([42; 32]);
8373                 let test_secret = PaymentSecret([43; 32]);
8374                 let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
8375                 let session_privs = nodes[0].node.test_add_new_pending_payment(payment_hash,
8376                         RecipientOnionFields::secret_only(test_secret), PaymentId(payment_hash.0), &route).unwrap();
8377                 nodes[0].node.test_send_payment_internal(&route, payment_hash,
8378                         RecipientOnionFields::secret_only(test_secret), Some(test_preimage),
8379                         PaymentId(payment_hash.0), None, session_privs).unwrap();
8380                 check_added_monitors!(nodes[0], 1);
8381
8382                 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
8383                 assert_eq!(updates.update_add_htlcs.len(), 1);
8384                 assert!(updates.update_fulfill_htlcs.is_empty());
8385                 assert!(updates.update_fail_htlcs.is_empty());
8386                 assert!(updates.update_fail_malformed_htlcs.is_empty());
8387                 assert!(updates.update_fee.is_none());
8388                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
8389
8390                 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "We don't support MPP keysend payments", 1);
8391         }
8392
8393         #[test]
8394         fn test_multi_hop_missing_secret() {
8395                 let chanmon_cfgs = create_chanmon_cfgs(4);
8396                 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
8397                 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
8398                 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
8399
8400                 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id;
8401                 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id;
8402                 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id;
8403                 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id;
8404
8405                 // Marshall an MPP route.
8406                 let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
8407                 let path = route.paths[0].clone();
8408                 route.paths.push(path);
8409                 route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
8410                 route.paths[0][0].short_channel_id = chan_1_id;
8411                 route.paths[0][1].short_channel_id = chan_3_id;
8412                 route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
8413                 route.paths[1][0].short_channel_id = chan_2_id;
8414                 route.paths[1][1].short_channel_id = chan_4_id;
8415
8416                 match nodes[0].node.send_payment_with_route(&route, payment_hash,
8417                         RecipientOnionFields::spontaneous_empty(), PaymentId(payment_hash.0))
8418                 .unwrap_err() {
8419                         PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
8420                                 assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))
8421                         },
8422                         _ => panic!("unexpected error")
8423                 }
8424         }
8425
8426         #[test]
8427         fn test_drop_disconnected_peers_when_removing_channels() {
8428                 let chanmon_cfgs = create_chanmon_cfgs(2);
8429                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8430                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8431                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8432
8433                 let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
8434
8435                 nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id());
8436                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
8437
8438                 nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
8439                 check_closed_broadcast!(nodes[0], true);
8440                 check_added_monitors!(nodes[0], 1);
8441                 check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
8442
8443                 {
8444                         // Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
8445                         // disconnected and the channel between has been force closed.
8446                         let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
8447                         // Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
8448                         assert_eq!(nodes_0_per_peer_state.len(), 1);
8449                         assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
8450                 }
8451
8452                 nodes[0].node.timer_tick_occurred();
8453
8454                 {
8455                         // Assert that nodes[1] has now been removed.
8456                         assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
8457                 }
8458         }
8459
8460         #[test]
8461         fn bad_inbound_payment_hash() {
8462                 // Add coverage for checking that a user-provided payment hash matches the payment secret.
8463                 let chanmon_cfgs = create_chanmon_cfgs(2);
8464                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8465                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8466                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8467
8468                 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
8469                 let payment_data = msgs::FinalOnionHopData {
8470                         payment_secret,
8471                         total_msat: 100_000,
8472                 };
8473
8474                 // Ensure that if the payment hash given to `inbound_payment::verify` differs from the original,
8475                 // payment verification fails as expected.
8476                 let mut bad_payment_hash = payment_hash.clone();
8477                 bad_payment_hash.0[0] += 1;
8478                 match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
8479                         Ok(_) => panic!("Unexpected ok"),
8480                         Err(()) => {
8481                                 nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment", "Failing HTLC with user-generated payment_hash", 1);
8482                         }
8483                 }
8484
8485                 // Check that using the original payment hash succeeds.
8486                 assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
8487         }
8488
8489         #[test]
8490         fn test_id_to_peer_coverage() {
8491                 // Test that the `ChannelManager:id_to_peer` contains channels which have been assigned
8492                 // a `channel_id` (i.e. have had the funding tx created), and that they are removed once
8493                 // the channel is successfully closed.
8494                 let chanmon_cfgs = create_chanmon_cfgs(2);
8495                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8496                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8497                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8498
8499                 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 500_000_000, 42, None).unwrap();
8500                 let open_channel = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8501                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel);
8502                 let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8503                 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
8504
8505                 let (temporary_channel_id, tx, _funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42);
8506                 let channel_id = &tx.txid().into_inner();
8507                 {
8508                         // Ensure that the `id_to_peer` map is empty until either party has received the
8509                         // funding transaction, and have the real `channel_id`.
8510                         assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
8511                         assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
8512                 }
8513
8514                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap();
8515                 {
8516                         // Assert that `nodes[0]`'s `id_to_peer` map is populated with the channel as soon as
8517                         // as it has the funding transaction.
8518                         let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
8519                         assert_eq!(nodes_0_lock.len(), 1);
8520                         assert!(nodes_0_lock.contains_key(channel_id));
8521                 }
8522
8523                 assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
8524
8525                 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8526
8527                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
8528                 {
8529                         let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
8530                         assert_eq!(nodes_0_lock.len(), 1);
8531                         assert!(nodes_0_lock.contains_key(channel_id));
8532                 }
8533                 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
8534
8535                 {
8536                         // Assert that `nodes[1]`'s `id_to_peer` map is populated with the channel as soon as
8537                         // as it has the funding transaction.
8538                         let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
8539                         assert_eq!(nodes_1_lock.len(), 1);
8540                         assert!(nodes_1_lock.contains_key(channel_id));
8541                 }
8542                 check_added_monitors!(nodes[1], 1);
8543                 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
8544                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
8545                 check_added_monitors!(nodes[0], 1);
8546                 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
8547                 let (channel_ready, _) = create_chan_between_nodes_with_value_confirm(&nodes[0], &nodes[1], &tx);
8548                 let (announcement, nodes_0_update, nodes_1_update) = create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready);
8549                 update_nodes_with_chan_announce(&nodes, 0, 1, &announcement, &nodes_0_update, &nodes_1_update);
8550
8551                 nodes[0].node.close_channel(channel_id, &nodes[1].node.get_our_node_id()).unwrap();
8552                 nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()));
8553                 let nodes_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
8554                 nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &nodes_1_shutdown);
8555
8556                 let closing_signed_node_0 = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
8557                 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0);
8558                 {
8559                         // Assert that the channel is kept in the `id_to_peer` map for both nodes until the
8560                         // channel can be fully closed by both parties (i.e. no outstanding htlcs exists, the
8561                         // fee for the closing transaction has been negotiated and the parties has the other
8562                         // party's signature for the fee negotiated closing transaction.)
8563                         let nodes_0_lock = nodes[0].node.id_to_peer.lock().unwrap();
8564                         assert_eq!(nodes_0_lock.len(), 1);
8565                         assert!(nodes_0_lock.contains_key(channel_id));
8566                 }
8567
8568                 {
8569                         // At this stage, `nodes[1]` has proposed a fee for the closing transaction in the
8570                         // `handle_closing_signed` call above. As `nodes[1]` has not yet received the signature
8571                         // from `nodes[0]` for the closing transaction with the proposed fee, the channel is
8572                         // kept in the `nodes[1]`'s `id_to_peer` map.
8573                         let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
8574                         assert_eq!(nodes_1_lock.len(), 1);
8575                         assert!(nodes_1_lock.contains_key(channel_id));
8576                 }
8577
8578                 nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id()));
8579                 {
8580                         // `nodes[0]` accepts `nodes[1]`'s proposed fee for the closing transaction, and
8581                         // therefore has all it needs to fully close the channel (both signatures for the
8582                         // closing transaction).
8583                         // Assert that the channel is removed from `nodes[0]`'s `id_to_peer` map as it can be
8584                         // fully closed by `nodes[0]`.
8585                         assert_eq!(nodes[0].node.id_to_peer.lock().unwrap().len(), 0);
8586
8587                         // Assert that the channel is still in `nodes[1]`'s  `id_to_peer` map, as `nodes[1]`
8588                         // doesn't have `nodes[0]`'s signature for the closing transaction yet.
8589                         let nodes_1_lock = nodes[1].node.id_to_peer.lock().unwrap();
8590                         assert_eq!(nodes_1_lock.len(), 1);
8591                         assert!(nodes_1_lock.contains_key(channel_id));
8592                 }
8593
8594                 let (_nodes_0_update, closing_signed_node_0) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
8595
8596                 nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &closing_signed_node_0.unwrap());
8597                 {
8598                         // Assert that the channel has now been removed from both parties `id_to_peer` map once
8599                         // they both have everything required to fully close the channel.
8600                         assert_eq!(nodes[1].node.id_to_peer.lock().unwrap().len(), 0);
8601                 }
8602                 let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
8603
8604                 check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
8605                 check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
8606         }
8607
8608         fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
8609                 let expected_message = format!("Not connected to node: {}", expected_public_key);
8610                 check_api_error_message(expected_message, res_err)
8611         }
8612
8613         fn check_unkown_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
8614                 let expected_message = format!("Can't find a peer matching the passed counterparty node_id {}", expected_public_key);
8615                 check_api_error_message(expected_message, res_err)
8616         }
8617
8618         fn check_api_error_message<T>(expected_err_message: String, res_err: Result<T, APIError>) {
8619                 match res_err {
8620                         Err(APIError::APIMisuseError { err }) => {
8621                                 assert_eq!(err, expected_err_message);
8622                         },
8623                         Err(APIError::ChannelUnavailable { err }) => {
8624                                 assert_eq!(err, expected_err_message);
8625                         },
8626                         Ok(_) => panic!("Unexpected Ok"),
8627                         Err(_) => panic!("Unexpected Error"),
8628                 }
8629         }
8630
8631         #[test]
8632         fn test_api_calls_with_unkown_counterparty_node() {
8633                 // Tests that our API functions that expects a `counterparty_node_id` as input, behaves as
8634                 // expected if the `counterparty_node_id` is an unkown peer in the
8635                 // `ChannelManager::per_peer_state` map.
8636                 let chanmon_cfg = create_chanmon_cfgs(2);
8637                 let node_cfg = create_node_cfgs(2, &chanmon_cfg);
8638                 let node_chanmgr = create_node_chanmgrs(2, &node_cfg, &[None, None]);
8639                 let nodes = create_network(2, &node_cfg, &node_chanmgr);
8640
8641                 // Dummy values
8642                 let channel_id = [4; 32];
8643                 let unkown_public_key = PublicKey::from_secret_key(&Secp256k1::signing_only(), &SecretKey::from_slice(&[42; 32]).unwrap());
8644                 let intercept_id = InterceptId([0; 32]);
8645
8646                 // Test the API functions.
8647                 check_not_connected_to_peer_error(nodes[0].node.create_channel(unkown_public_key, 1_000_000, 500_000_000, 42, None), unkown_public_key);
8648
8649                 check_unkown_peer_error(nodes[0].node.accept_inbound_channel(&channel_id, &unkown_public_key, 42), unkown_public_key);
8650
8651                 check_unkown_peer_error(nodes[0].node.close_channel(&channel_id, &unkown_public_key), unkown_public_key);
8652
8653                 check_unkown_peer_error(nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &unkown_public_key), unkown_public_key);
8654
8655                 check_unkown_peer_error(nodes[0].node.force_close_without_broadcasting_txn(&channel_id, &unkown_public_key), unkown_public_key);
8656
8657                 check_unkown_peer_error(nodes[0].node.forward_intercepted_htlc(intercept_id, &channel_id, unkown_public_key, 1_000_000), unkown_public_key);
8658
8659                 check_unkown_peer_error(nodes[0].node.update_channel_config(&unkown_public_key, &[channel_id], &ChannelConfig::default()), unkown_public_key);
8660         }
8661
8662         #[test]
8663         fn test_connection_limiting() {
8664                 // Test that we limit un-channel'd peers and un-funded channels properly.
8665                 let chanmon_cfgs = create_chanmon_cfgs(2);
8666                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8667                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8668                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8669
8670                 // Note that create_network connects the nodes together for us
8671
8672                 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
8673                 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8674
8675                 let mut funding_tx = None;
8676                 for idx in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
8677                         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8678                         let accept_channel = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8679
8680                         if idx == 0 {
8681                                 nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_channel);
8682                                 let (temporary_channel_id, tx, _) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100_000, 42);
8683                                 funding_tx = Some(tx.clone());
8684                                 nodes[0].node.funding_transaction_generated(&temporary_channel_id, &nodes[1].node.get_our_node_id(), tx).unwrap();
8685                                 let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
8686
8687                                 nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg);
8688                                 check_added_monitors!(nodes[1], 1);
8689                                 expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id());
8690
8691                                 let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id());
8692
8693                                 nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed);
8694                                 check_added_monitors!(nodes[0], 1);
8695                                 expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id());
8696                         }
8697                         open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8698                 }
8699
8700                 // A MAX_UNFUNDED_CHANS_PER_PEER + 1 channel will be summarily rejected
8701                 open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8702                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8703                 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
8704                         open_channel_msg.temporary_channel_id);
8705
8706                 // Further, because all of our channels with nodes[0] are inbound, and none of them funded,
8707                 // it doesn't count as a "protected" peer, i.e. it counts towards the MAX_NO_CHANNEL_PEERS
8708                 // limit.
8709                 let mut peer_pks = Vec::with_capacity(super::MAX_NO_CHANNEL_PEERS);
8710                 for _ in 1..super::MAX_NO_CHANNEL_PEERS {
8711                         let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
8712                                 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
8713                         peer_pks.push(random_pk);
8714                         nodes[1].node.peer_connected(&random_pk, &msgs::Init {
8715                                 features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8716                 }
8717                 let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
8718                         &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
8719                 nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
8720                         features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
8721
8722                 // Also importantly, because nodes[0] isn't "protected", we will refuse a reconnection from
8723                 // them if we have too many un-channel'd peers.
8724                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
8725                 let chan_closed_events = nodes[1].node.get_and_clear_pending_events();
8726                 assert_eq!(chan_closed_events.len(), super::MAX_UNFUNDED_CHANS_PER_PEER - 1);
8727                 for ev in chan_closed_events {
8728                         if let Event::ChannelClosed { .. } = ev { } else { panic!(); }
8729                 }
8730                 nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
8731                         features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8732                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
8733                         features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap_err();
8734
8735                 // but of course if the connection is outbound its allowed...
8736                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
8737                         features: nodes[0].node.init_features(), remote_network_address: None }, false).unwrap();
8738                 nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id());
8739
8740                 // Now nodes[0] is disconnected but still has a pending, un-funded channel lying around.
8741                 // Even though we accept one more connection from new peers, we won't actually let them
8742                 // open channels.
8743                 assert!(peer_pks.len() > super::MAX_UNFUNDED_CHANNEL_PEERS - 1);
8744                 for i in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
8745                         nodes[1].node.handle_open_channel(&peer_pks[i], &open_channel_msg);
8746                         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, peer_pks[i]);
8747                         open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8748                 }
8749                 nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
8750                 assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
8751                         open_channel_msg.temporary_channel_id);
8752
8753                 // Of course, however, outbound channels are always allowed
8754                 nodes[1].node.create_channel(last_random_pk, 100_000, 0, 42, None).unwrap();
8755                 get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, last_random_pk);
8756
8757                 // If we fund the first channel, nodes[0] has a live on-chain channel with us, it is now
8758                 // "protected" and can connect again.
8759                 mine_transaction(&nodes[1], funding_tx.as_ref().unwrap());
8760                 nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init {
8761                         features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8762                 get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id());
8763
8764                 // Further, because the first channel was funded, we can open another channel with
8765                 // last_random_pk.
8766                 nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
8767                 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
8768         }
8769
8770         #[test]
8771         fn test_outbound_chans_unlimited() {
8772                 // Test that we never refuse an outbound channel even if a peer is unfuned-channel-limited
8773                 let chanmon_cfgs = create_chanmon_cfgs(2);
8774                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8775                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
8776                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8777
8778                 // Note that create_network connects the nodes together for us
8779
8780                 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
8781                 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8782
8783                 for _ in 0..super::MAX_UNFUNDED_CHANS_PER_PEER {
8784                         nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8785                         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id());
8786                         open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8787                 }
8788
8789                 // Once we have MAX_UNFUNDED_CHANS_PER_PEER unfunded channels, new inbound channels will be
8790                 // rejected.
8791                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8792                 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
8793                         open_channel_msg.temporary_channel_id);
8794
8795                 // but we can still open an outbound channel.
8796                 nodes[1].node.create_channel(nodes[0].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
8797                 get_event_msg!(nodes[1], MessageSendEvent::SendOpenChannel, nodes[0].node.get_our_node_id());
8798
8799                 // but even with such an outbound channel, additional inbound channels will still fail.
8800                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8801                 assert_eq!(get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id()).channel_id,
8802                         open_channel_msg.temporary_channel_id);
8803         }
8804
8805         #[test]
8806         fn test_0conf_limiting() {
8807                 // Tests that we properly limit inbound channels when we have the manual-channel-acceptance
8808                 // flag set and (sometimes) accept channels as 0conf.
8809                 let chanmon_cfgs = create_chanmon_cfgs(2);
8810                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8811                 let mut settings = test_default_channel_config();
8812                 settings.manually_accept_inbound_channels = true;
8813                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, Some(settings)]);
8814                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8815
8816                 // Note that create_network connects the nodes together for us
8817
8818                 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None).unwrap();
8819                 let mut open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8820
8821                 // First, get us up to MAX_UNFUNDED_CHANNEL_PEERS so we can test at the edge
8822                 for _ in 0..super::MAX_UNFUNDED_CHANNEL_PEERS - 1 {
8823                         let random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
8824                                 &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
8825                         nodes[1].node.peer_connected(&random_pk, &msgs::Init {
8826                                 features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8827
8828                         nodes[1].node.handle_open_channel(&random_pk, &open_channel_msg);
8829                         let events = nodes[1].node.get_and_clear_pending_events();
8830                         match events[0] {
8831                                 Event::OpenChannelRequest { temporary_channel_id, .. } => {
8832                                         nodes[1].node.accept_inbound_channel(&temporary_channel_id, &random_pk, 23).unwrap();
8833                                 }
8834                                 _ => panic!("Unexpected event"),
8835                         }
8836                         get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, random_pk);
8837                         open_channel_msg.temporary_channel_id = nodes[0].keys_manager.get_secure_random_bytes();
8838                 }
8839
8840                 // If we try to accept a channel from another peer non-0conf it will fail.
8841                 let last_random_pk = PublicKey::from_secret_key(&nodes[0].node.secp_ctx,
8842                         &SecretKey::from_slice(&nodes[1].keys_manager.get_secure_random_bytes()).unwrap());
8843                 nodes[1].node.peer_connected(&last_random_pk, &msgs::Init {
8844                         features: nodes[0].node.init_features(), remote_network_address: None }, true).unwrap();
8845                 nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
8846                 let events = nodes[1].node.get_and_clear_pending_events();
8847                 match events[0] {
8848                         Event::OpenChannelRequest { temporary_channel_id, .. } => {
8849                                 match nodes[1].node.accept_inbound_channel(&temporary_channel_id, &last_random_pk, 23) {
8850                                         Err(APIError::APIMisuseError { err }) =>
8851                                                 assert_eq!(err, "Too many peers with unfunded channels, refusing to accept new ones"),
8852                                         _ => panic!(),
8853                                 }
8854                         }
8855                         _ => panic!("Unexpected event"),
8856                 }
8857                 assert_eq!(get_err_msg(&nodes[1], &last_random_pk).channel_id,
8858                         open_channel_msg.temporary_channel_id);
8859
8860                 // ...however if we accept the same channel 0conf it should work just fine.
8861                 nodes[1].node.handle_open_channel(&last_random_pk, &open_channel_msg);
8862                 let events = nodes[1].node.get_and_clear_pending_events();
8863                 match events[0] {
8864                         Event::OpenChannelRequest { temporary_channel_id, .. } => {
8865                                 nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &last_random_pk, 23).unwrap();
8866                         }
8867                         _ => panic!("Unexpected event"),
8868                 }
8869                 get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, last_random_pk);
8870         }
8871
8872         #[cfg(anchors)]
8873         #[test]
8874         fn test_anchors_zero_fee_htlc_tx_fallback() {
8875                 // Tests that if both nodes support anchors, but the remote node does not want to accept
8876                 // anchor channels at the moment, an error it sent to the local node such that it can retry
8877                 // the channel without the anchors feature.
8878                 let chanmon_cfgs = create_chanmon_cfgs(2);
8879                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
8880                 let mut anchors_config = test_default_channel_config();
8881                 anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
8882                 anchors_config.manually_accept_inbound_channels = true;
8883                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config.clone())]);
8884                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
8885
8886                 nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 0, None).unwrap();
8887                 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8888                 assert!(open_channel_msg.channel_type.as_ref().unwrap().supports_anchors_zero_fee_htlc_tx());
8889
8890                 nodes[1].node.handle_open_channel(&nodes[0].node.get_our_node_id(), &open_channel_msg);
8891                 let events = nodes[1].node.get_and_clear_pending_events();
8892                 match events[0] {
8893                         Event::OpenChannelRequest { temporary_channel_id, .. } => {
8894                                 nodes[1].node.force_close_broadcasting_latest_txn(&temporary_channel_id, &nodes[0].node.get_our_node_id()).unwrap();
8895                         }
8896                         _ => panic!("Unexpected event"),
8897                 }
8898
8899                 let error_msg = get_err_msg(&nodes[1], &nodes[0].node.get_our_node_id());
8900                 nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &error_msg);
8901
8902                 let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8903                 assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
8904
8905                 check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
8906         }
8907 }
8908
8909 #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
8910 pub mod bench {
8911         use crate::chain::Listen;
8912         use crate::chain::chainmonitor::{ChainMonitor, Persist};
8913         use crate::chain::keysinterface::{KeysManager, InMemorySigner};
8914         use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider};
8915         use crate::ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage, PaymentId, RecipientOnionFields, Retry};
8916         use crate::ln::functional_test_utils::*;
8917         use crate::ln::msgs::{ChannelMessageHandler, Init};
8918         use crate::routing::gossip::NetworkGraph;
8919         use crate::routing::router::{PaymentParameters, RouteParameters};
8920         use crate::util::test_utils;
8921         use crate::util::config::UserConfig;
8922
8923         use bitcoin::hashes::Hash;
8924         use bitcoin::hashes::sha256::Hash as Sha256;
8925         use bitcoin::{Block, BlockHeader, PackedLockTime, Transaction, TxMerkleNode, TxOut};
8926
8927         use crate::sync::{Arc, Mutex};
8928
8929         use test::Bencher;
8930
8931         type Manager<'a, P> = ChannelManager<
8932                 &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
8933                         &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
8934                         &'a test_utils::TestLogger, &'a P>,
8935                 &'a test_utils::TestBroadcaster, &'a KeysManager, &'a KeysManager, &'a KeysManager,
8936                 &'a test_utils::TestFeeEstimator, &'a test_utils::TestRouter<'a>,
8937                 &'a test_utils::TestLogger>;
8938
8939         struct ANodeHolder<'a, P: Persist<InMemorySigner>> {
8940                 node: &'a Manager<'a, P>,
8941         }
8942         impl<'a, P: Persist<InMemorySigner>> NodeHolder for ANodeHolder<'a, P> {
8943                 type CM = Manager<'a, P>;
8944                 #[inline]
8945                 fn node(&self) -> &Manager<'a, P> { self.node }
8946                 #[inline]
8947                 fn chain_monitor(&self) -> Option<&test_utils::TestChainMonitor> { None }
8948         }
8949
8950         #[cfg(test)]
8951         #[bench]
8952         fn bench_sends(bench: &mut Bencher) {
8953                 bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
8954         }
8955
8956         pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
8957                 // Do a simple benchmark of sending a payment back and forth between two nodes.
8958                 // Note that this is unrealistic as each payment send will require at least two fsync
8959                 // calls per node.
8960                 let network = bitcoin::Network::Testnet;
8961
8962                 let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
8963                 let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
8964                 let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
8965                 let scorer = Mutex::new(test_utils::TestScorer::new());
8966                 let router = test_utils::TestRouter::new(Arc::new(NetworkGraph::new(network, &logger_a)), &scorer);
8967
8968                 let mut config: UserConfig = Default::default();
8969                 config.channel_handshake_config.minimum_depth = 1;
8970
8971                 let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
8972                 let seed_a = [1u8; 32];
8973                 let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
8974                 let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &router, &logger_a, &keys_manager_a, &keys_manager_a, &keys_manager_a, config.clone(), ChainParameters {
8975                         network,
8976                         best_block: BestBlock::from_network(network),
8977                 });
8978                 let node_a_holder = ANodeHolder { node: &node_a };
8979
8980                 let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
8981                 let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
8982                 let seed_b = [2u8; 32];
8983                 let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
8984                 let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &router, &logger_b, &keys_manager_b, &keys_manager_b, &keys_manager_b, config.clone(), ChainParameters {
8985                         network,
8986                         best_block: BestBlock::from_network(network),
8987                 });
8988                 let node_b_holder = ANodeHolder { node: &node_b };
8989
8990                 node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: node_b.init_features(), remote_network_address: None }, true).unwrap();
8991                 node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: node_a.init_features(), remote_network_address: None }, false).unwrap();
8992                 node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
8993                 node_b.handle_open_channel(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
8994                 node_a.handle_accept_channel(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
8995
8996                 let tx;
8997                 if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
8998                         tx = Transaction { version: 2, lock_time: PackedLockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8999                                 value: 8_000_000, script_pubkey: output_script,
9000                         }]};
9001                         node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
9002                 } else { panic!(); }
9003
9004                 node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
9005                 let events_b = node_b.get_and_clear_pending_events();
9006                 assert_eq!(events_b.len(), 1);
9007                 match events_b[0] {
9008                         Event::ChannelPending{ ref counterparty_node_id, .. } => {
9009                                 assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
9010                         },
9011                         _ => panic!("Unexpected event"),
9012                 }
9013
9014                 node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
9015                 let events_a = node_a.get_and_clear_pending_events();
9016                 assert_eq!(events_a.len(), 1);
9017                 match events_a[0] {
9018                         Event::ChannelPending{ ref counterparty_node_id, .. } => {
9019                                 assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
9020                         },
9021                         _ => panic!("Unexpected event"),
9022                 }
9023
9024                 assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
9025
9026                 let block = Block {
9027                         header: BlockHeader { version: 0x20000000, prev_blockhash: BestBlock::from_network(network).block_hash(), merkle_root: TxMerkleNode::all_zeros(), time: 42, bits: 42, nonce: 42 },
9028                         txdata: vec![tx],
9029                 };
9030                 Listen::block_connected(&node_a, &block, 1);
9031                 Listen::block_connected(&node_b, &block, 1);
9032
9033                 node_a.handle_channel_ready(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendChannelReady, node_a.get_our_node_id()));
9034                 let msg_events = node_a.get_and_clear_pending_msg_events();
9035                 assert_eq!(msg_events.len(), 2);
9036                 match msg_events[0] {
9037                         MessageSendEvent::SendChannelReady { ref msg, .. } => {
9038                                 node_b.handle_channel_ready(&node_a.get_our_node_id(), msg);
9039                                 get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
9040                         },
9041                         _ => panic!(),
9042                 }
9043                 match msg_events[1] {
9044                         MessageSendEvent::SendChannelUpdate { .. } => {},
9045                         _ => panic!(),
9046                 }
9047
9048                 let events_a = node_a.get_and_clear_pending_events();
9049                 assert_eq!(events_a.len(), 1);
9050                 match events_a[0] {
9051                         Event::ChannelReady{ ref counterparty_node_id, .. } => {
9052                                 assert_eq!(*counterparty_node_id, node_b.get_our_node_id());
9053                         },
9054                         _ => panic!("Unexpected event"),
9055                 }
9056
9057                 let events_b = node_b.get_and_clear_pending_events();
9058                 assert_eq!(events_b.len(), 1);
9059                 match events_b[0] {
9060                         Event::ChannelReady{ ref counterparty_node_id, .. } => {
9061                                 assert_eq!(*counterparty_node_id, node_a.get_our_node_id());
9062                         },
9063                         _ => panic!("Unexpected event"),
9064                 }
9065
9066                 let mut payment_count: u64 = 0;
9067                 macro_rules! send_payment {
9068                         ($node_a: expr, $node_b: expr) => {
9069                                 let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id(), TEST_FINAL_CLTV)
9070                                         .with_features($node_b.invoice_features());
9071                                 let mut payment_preimage = PaymentPreimage([0; 32]);
9072                                 payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
9073                                 payment_count += 1;
9074                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
9075                                 let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200, None).unwrap();
9076
9077                                 $node_a.send_payment(payment_hash, RecipientOnionFields::secret_only(payment_secret),
9078                                         PaymentId(payment_hash.0), RouteParameters {
9079                                                 payment_params, final_value_msat: 10_000,
9080                                         }, Retry::Attempts(0)).unwrap();
9081                                 let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
9082                                 $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
9083                                 $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
9084                                 let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_b }, &$node_a.get_our_node_id());
9085                                 $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
9086                                 $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
9087                                 $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
9088
9089                                 expect_pending_htlcs_forwardable!(ANodeHolder { node: &$node_b });
9090                                 expect_payment_claimable!(ANodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
9091                                 $node_b.claim_funds(payment_preimage);
9092                                 expect_payment_claimed!(ANodeHolder { node: &$node_b }, payment_hash, 10_000);
9093
9094                                 match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
9095                                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
9096                                                 assert_eq!(node_id, $node_a.get_our_node_id());
9097                                                 $node_a.handle_update_fulfill_htlc(&$node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
9098                                                 $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &updates.commitment_signed);
9099                                         },
9100                                         _ => panic!("Failed to generate claim event"),
9101                                 }
9102
9103                                 let (raa, cs) = get_revoke_commit_msgs(&ANodeHolder { node: &$node_a }, &$node_b.get_our_node_id());
9104                                 $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
9105                                 $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
9106                                 $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(ANodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
9107
9108                                 expect_payment_sent!(ANodeHolder { node: &$node_a }, payment_preimage);
9109                         }
9110                 }
9111
9112                 bench.iter(|| {
9113                         send_payment!(node_a, node_b);
9114                         send_payment!(node_b, node_a);
9115                 });
9116         }
9117 }