Rename HTLC `onchain_value_satoshis` to `htlc_value_satoshis`
[rust-lightning] / lightning / src / ln / channelmanager.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 //! The top-level channel management and payment tracking stuff lives here.
11 //!
12 //! The ChannelManager is the main chunk of logic implementing the lightning protocol and is
13 //! responsible for tracking which channels are open, HTLCs are in flight and reestablishing those
14 //! upon reconnect to the relevant peer(s).
15 //!
16 //! It does not manage routing logic (see routing::router::get_route for that) nor does it manage constructing
17 //! on-chain transactions (it only monitors the chain to watch for any force-closes that might
18 //! imply it needs to fail HTLCs/payments/channels it manages).
19 //!
20
21 use bitcoin::blockdata::block::BlockHeader;
22 use bitcoin::blockdata::transaction::Transaction;
23 use bitcoin::blockdata::constants::genesis_block;
24 use bitcoin::network::constants::Network;
25
26 use bitcoin::hashes::{Hash, HashEngine};
27 use bitcoin::hashes::sha256::Hash as Sha256;
28 use bitcoin::hashes::sha256d::Hash as Sha256dHash;
29 use bitcoin::hash_types::{BlockHash, Txid};
30
31 use bitcoin::secp256k1::{SecretKey,PublicKey};
32 use bitcoin::secp256k1::Secp256k1;
33 use bitcoin::secp256k1::ecdh::SharedSecret;
34 use bitcoin::secp256k1;
35
36 use chain;
37 use chain::{Confirm, ChannelMonitorUpdateErr, Watch, BestBlock};
38 use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator};
39 use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID};
40 use chain::transaction::{OutPoint, TransactionData};
41 // Since this struct is returned in `list_channels` methods, expose it here in case users want to
42 // construct one themselves.
43 use ln::{inbound_payment, PaymentHash, PaymentPreimage, PaymentSecret};
44 use ln::channel::{Channel, ChannelError, ChannelUpdateStatus, UpdateFulfillCommitFetch};
45 use ln::features::{ChannelTypeFeatures, InitFeatures, NodeFeatures};
46 use routing::router::{PaymentParameters, Route, RouteHop, RoutePath, RouteParameters};
47 use ln::msgs;
48 use ln::msgs::NetAddress;
49 use ln::onion_utils;
50 use ln::msgs::{ChannelMessageHandler, DecodeError, LightningError, MAX_VALUE_MSAT, OptionalField};
51 use ln::wire::Encode;
52 use chain::keysinterface::{Sign, KeysInterface, KeysManager, InMemorySigner, Recipient};
53 use util::config::UserConfig;
54 use util::events::{EventHandler, EventsProvider, MessageSendEvent, MessageSendEventsProvider, ClosureReason};
55 use util::{byte_utils, events};
56 use util::scid_utils::fake_scid;
57 use util::ser::{BigSize, FixedLengthReader, Readable, ReadableArgs, MaybeReadable, Writeable, Writer, VecWriter};
58 use util::logger::{Level, Logger};
59 use util::errors::APIError;
60
61 use io;
62 use prelude::*;
63 use core::{cmp, mem};
64 use core::cell::RefCell;
65 use io::Read;
66 use sync::{Arc, Condvar, Mutex, MutexGuard, RwLock, RwLockReadGuard};
67 use core::sync::atomic::{AtomicUsize, Ordering};
68 use core::time::Duration;
69 use core::ops::Deref;
70
71 #[cfg(any(test, feature = "std"))]
72 use std::time::Instant;
73 use util::crypto::sign;
74
75 // We hold various information about HTLC relay in the HTLC objects in Channel itself:
76 //
77 // Upon receipt of an HTLC from a peer, we'll give it a PendingHTLCStatus indicating if it should
78 // forward the HTLC with information it will give back to us when it does so, or if it should Fail
79 // the HTLC with the relevant message for the Channel to handle giving to the remote peer.
80 //
81 // Once said HTLC is committed in the Channel, if the PendingHTLCStatus indicated Forward, the
82 // Channel will return the PendingHTLCInfo back to us, and we will create an HTLCForwardInfo
83 // with it to track where it came from (in case of onwards-forward error), waiting a random delay
84 // before we forward it.
85 //
86 // We will then use HTLCForwardInfo's PendingHTLCInfo to construct an outbound HTLC, with a
87 // relevant HTLCSource::PreviousHopData filled in to indicate where it came from (which we can use
88 // to either fail-backwards or fulfill the HTLC backwards along the relevant path).
89 // Alternatively, we can fill an outbound HTLC with a HTLCSource::OutboundRoute indicating this is
90 // our payment, which we can use to decode errors or inform the user that the payment was sent.
91
92 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
93 pub(super) enum PendingHTLCRouting {
94         Forward {
95                 onion_packet: msgs::OnionPacket,
96                 short_channel_id: u64, // This should be NonZero<u64> eventually when we bump MSRV
97         },
98         Receive {
99                 payment_data: msgs::FinalOnionHopData,
100                 incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
101                 phantom_shared_secret: Option<[u8; 32]>,
102         },
103         ReceiveKeysend {
104                 payment_preimage: PaymentPreimage,
105                 incoming_cltv_expiry: u32, // Used to track when we should expire pending HTLCs that go unclaimed
106         },
107 }
108
109 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
110 pub(super) struct PendingHTLCInfo {
111         pub(super) routing: PendingHTLCRouting,
112         pub(super) incoming_shared_secret: [u8; 32],
113         payment_hash: PaymentHash,
114         pub(super) amt_to_forward: u64,
115         pub(super) outgoing_cltv_value: u32,
116 }
117
118 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
119 pub(super) enum HTLCFailureMsg {
120         Relay(msgs::UpdateFailHTLC),
121         Malformed(msgs::UpdateFailMalformedHTLC),
122 }
123
124 /// Stores whether we can't forward an HTLC or relevant forwarding info
125 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
126 pub(super) enum PendingHTLCStatus {
127         Forward(PendingHTLCInfo),
128         Fail(HTLCFailureMsg),
129 }
130
131 pub(super) enum HTLCForwardInfo {
132         AddHTLC {
133                 forward_info: PendingHTLCInfo,
134
135                 // These fields are produced in `forward_htlcs()` and consumed in
136                 // `process_pending_htlc_forwards()` for constructing the
137                 // `HTLCSource::PreviousHopData` for failed and forwarded
138                 // HTLCs.
139                 prev_short_channel_id: u64,
140                 prev_htlc_id: u64,
141                 prev_funding_outpoint: OutPoint,
142         },
143         FailHTLC {
144                 htlc_id: u64,
145                 err_packet: msgs::OnionErrorPacket,
146         },
147 }
148
149 /// Tracks the inbound corresponding to an outbound HTLC
150 #[derive(Clone, Hash, PartialEq, Eq)]
151 pub(crate) struct HTLCPreviousHopData {
152         short_channel_id: u64,
153         htlc_id: u64,
154         incoming_packet_shared_secret: [u8; 32],
155         phantom_shared_secret: Option<[u8; 32]>,
156
157         // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards
158         // channel with a preimage provided by the forward channel.
159         outpoint: OutPoint,
160 }
161
162 enum OnionPayload {
163         /// Indicates this incoming onion payload is for the purpose of paying an invoice.
164         Invoice {
165                 /// This is only here for backwards-compatibility in serialization, in the future it can be
166                 /// removed, breaking clients running 0.0.106 and earlier.
167                 _legacy_hop_data: msgs::FinalOnionHopData,
168         },
169         /// Contains the payer-provided preimage.
170         Spontaneous(PaymentPreimage),
171 }
172
173 /// HTLCs that are to us and can be failed/claimed by the user
174 struct ClaimableHTLC {
175         prev_hop: HTLCPreviousHopData,
176         cltv_expiry: u32,
177         /// The amount (in msats) of this MPP part
178         value: u64,
179         onion_payload: OnionPayload,
180         timer_ticks: u8,
181         /// The sum total of all MPP parts
182         total_msat: u64,
183 }
184
185 /// A payment identifier used to uniquely identify a payment to LDK.
186 /// (C-not exported) as we just use [u8; 32] directly
187 #[derive(Hash, Copy, Clone, PartialEq, Eq, Debug)]
188 pub struct PaymentId(pub [u8; 32]);
189
190 impl Writeable for PaymentId {
191         fn write<W: Writer>(&self, w: &mut W) -> Result<(), io::Error> {
192                 self.0.write(w)
193         }
194 }
195
196 impl Readable for PaymentId {
197         fn read<R: Read>(r: &mut R) -> Result<Self, DecodeError> {
198                 let buf: [u8; 32] = Readable::read(r)?;
199                 Ok(PaymentId(buf))
200         }
201 }
202 /// Tracks the inbound corresponding to an outbound HTLC
203 #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
204 #[derive(Clone, PartialEq, Eq)]
205 pub(crate) enum HTLCSource {
206         PreviousHopData(HTLCPreviousHopData),
207         OutboundRoute {
208                 path: Vec<RouteHop>,
209                 session_priv: SecretKey,
210                 /// Technically we can recalculate this from the route, but we cache it here to avoid
211                 /// doing a double-pass on route when we get a failure back
212                 first_hop_htlc_msat: u64,
213                 payment_id: PaymentId,
214                 payment_secret: Option<PaymentSecret>,
215                 payment_params: Option<PaymentParameters>,
216         },
217 }
218 #[allow(clippy::derive_hash_xor_eq)] // Our Hash is faithful to the data, we just don't have SecretKey::hash
219 impl core::hash::Hash for HTLCSource {
220         fn hash<H: core::hash::Hasher>(&self, hasher: &mut H) {
221                 match self {
222                         HTLCSource::PreviousHopData(prev_hop_data) => {
223                                 0u8.hash(hasher);
224                                 prev_hop_data.hash(hasher);
225                         },
226                         HTLCSource::OutboundRoute { path, session_priv, payment_id, payment_secret, first_hop_htlc_msat, payment_params } => {
227                                 1u8.hash(hasher);
228                                 path.hash(hasher);
229                                 session_priv[..].hash(hasher);
230                                 payment_id.hash(hasher);
231                                 payment_secret.hash(hasher);
232                                 first_hop_htlc_msat.hash(hasher);
233                                 payment_params.hash(hasher);
234                         },
235                 }
236         }
237 }
238 #[cfg(not(feature = "grind_signatures"))]
239 #[cfg(test)]
240 impl HTLCSource {
241         pub fn dummy() -> Self {
242                 HTLCSource::OutboundRoute {
243                         path: Vec::new(),
244                         session_priv: SecretKey::from_slice(&[1; 32]).unwrap(),
245                         first_hop_htlc_msat: 0,
246                         payment_id: PaymentId([2; 32]),
247                         payment_secret: None,
248                         payment_params: None,
249                 }
250         }
251 }
252
253 #[derive(Clone)] // See Channel::revoke_and_ack for why, tl;dr: Rust bug
254 pub(super) enum HTLCFailReason {
255         LightningError {
256                 err: msgs::OnionErrorPacket,
257         },
258         Reason {
259                 failure_code: u16,
260                 data: Vec<u8>,
261         }
262 }
263
264 struct ReceiveError {
265         err_code: u16,
266         err_data: Vec<u8>,
267         msg: &'static str,
268 }
269
270 /// Return value for claim_funds_from_hop
271 enum ClaimFundsFromHop {
272         PrevHopForceClosed,
273         MonitorUpdateFail(PublicKey, MsgHandleErrInternal, Option<u64>),
274         Success(u64),
275         DuplicateClaim,
276 }
277
278 type ShutdownResult = (Option<(OutPoint, ChannelMonitorUpdate)>, Vec<(HTLCSource, PaymentHash)>);
279
280 /// Error type returned across the channel_state mutex boundary. When an Err is generated for a
281 /// Channel, we generally end up with a ChannelError::Close for which we have to close the channel
282 /// immediately (ie with no further calls on it made). Thus, this step happens inside a
283 /// channel_state lock. We then return the set of things that need to be done outside the lock in
284 /// this struct and call handle_error!() on it.
285
286 struct MsgHandleErrInternal {
287         err: msgs::LightningError,
288         chan_id: Option<([u8; 32], u64)>, // If Some a channel of ours has been closed
289         shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
290 }
291 impl MsgHandleErrInternal {
292         #[inline]
293         fn send_err_msg_no_close(err: String, channel_id: [u8; 32]) -> Self {
294                 Self {
295                         err: LightningError {
296                                 err: err.clone(),
297                                 action: msgs::ErrorAction::SendErrorMessage {
298                                         msg: msgs::ErrorMessage {
299                                                 channel_id,
300                                                 data: err
301                                         },
302                                 },
303                         },
304                         chan_id: None,
305                         shutdown_finish: None,
306                 }
307         }
308         #[inline]
309         fn ignore_no_close(err: String) -> Self {
310                 Self {
311                         err: LightningError {
312                                 err,
313                                 action: msgs::ErrorAction::IgnoreError,
314                         },
315                         chan_id: None,
316                         shutdown_finish: None,
317                 }
318         }
319         #[inline]
320         fn from_no_close(err: msgs::LightningError) -> Self {
321                 Self { err, chan_id: None, shutdown_finish: None }
322         }
323         #[inline]
324         fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u64, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
325                 Self {
326                         err: LightningError {
327                                 err: err.clone(),
328                                 action: msgs::ErrorAction::SendErrorMessage {
329                                         msg: msgs::ErrorMessage {
330                                                 channel_id,
331                                                 data: err
332                                         },
333                                 },
334                         },
335                         chan_id: Some((channel_id, user_channel_id)),
336                         shutdown_finish: Some((shutdown_res, channel_update)),
337                 }
338         }
339         #[inline]
340         fn from_chan_no_close(err: ChannelError, channel_id: [u8; 32]) -> Self {
341                 Self {
342                         err: match err {
343                                 ChannelError::Warn(msg) =>  LightningError {
344                                         err: msg.clone(),
345                                         action: msgs::ErrorAction::SendWarningMessage {
346                                                 msg: msgs::WarningMessage {
347                                                         channel_id,
348                                                         data: msg
349                                                 },
350                                                 log_level: Level::Warn,
351                                         },
352                                 },
353                                 ChannelError::Ignore(msg) => LightningError {
354                                         err: msg,
355                                         action: msgs::ErrorAction::IgnoreError,
356                                 },
357                                 ChannelError::Close(msg) => LightningError {
358                                         err: msg.clone(),
359                                         action: msgs::ErrorAction::SendErrorMessage {
360                                                 msg: msgs::ErrorMessage {
361                                                         channel_id,
362                                                         data: msg
363                                                 },
364                                         },
365                                 },
366                                 ChannelError::CloseDelayBroadcast(msg) => LightningError {
367                                         err: msg.clone(),
368                                         action: msgs::ErrorAction::SendErrorMessage {
369                                                 msg: msgs::ErrorMessage {
370                                                         channel_id,
371                                                         data: msg
372                                                 },
373                                         },
374                                 },
375                         },
376                         chan_id: None,
377                         shutdown_finish: None,
378                 }
379         }
380 }
381
382 /// We hold back HTLCs we intend to relay for a random interval greater than this (see
383 /// Event::PendingHTLCsForwardable for the API guidelines indicating how long should be waited).
384 /// This provides some limited amount of privacy. Ideally this would range from somewhere like one
385 /// second to 30 seconds, but people expect lightning to be, you know, kinda fast, sadly.
386 const MIN_HTLC_RELAY_HOLDING_CELL_MILLIS: u64 = 100;
387
388 /// For events which result in both a RevokeAndACK and a CommitmentUpdate, by default they should
389 /// be sent in the order they appear in the return value, however sometimes the order needs to be
390 /// variable at runtime (eg Channel::channel_reestablish needs to re-send messages in the order
391 /// they were originally sent). In those cases, this enum is also returned.
392 #[derive(Clone, PartialEq)]
393 pub(super) enum RAACommitmentOrder {
394         /// Send the CommitmentUpdate messages first
395         CommitmentFirst,
396         /// Send the RevokeAndACK message first
397         RevokeAndACKFirst,
398 }
399
400 // Note this is only exposed in cfg(test):
401 pub(super) struct ChannelHolder<Signer: Sign> {
402         pub(super) by_id: HashMap<[u8; 32], Channel<Signer>>,
403         /// SCIDs (and outbound SCID aliases) to the real channel id. Outbound SCID aliases are added
404         /// here once the channel is available for normal use, with SCIDs being added once the funding
405         /// transaction is confirmed at the channel's required confirmation depth.
406         pub(super) short_to_id: HashMap<u64, [u8; 32]>,
407         /// SCID/SCID Alias -> forward infos. Key of 0 means payments received.
408         ///
409         /// Note that because we may have an SCID Alias as the key we can have two entries per channel,
410         /// though in practice we probably won't be receiving HTLCs for a channel both via the alias
411         /// and via the classic SCID.
412         ///
413         /// Note that while this is held in the same mutex as the channels themselves, no consistency
414         /// guarantees are made about the existence of a channel with the short id here, nor the short
415         /// ids in the PendingHTLCInfo!
416         pub(super) forward_htlcs: HashMap<u64, Vec<HTLCForwardInfo>>,
417         /// Map from payment hash to any HTLCs which are to us and can be failed/claimed by the user.
418         /// Note that while this is held in the same mutex as the channels themselves, no consistency
419         /// guarantees are made about the channels given here actually existing anymore by the time you
420         /// go to read them!
421         claimable_htlcs: HashMap<PaymentHash, Vec<ClaimableHTLC>>,
422         /// Messages to send to peers - pushed to in the same lock that they are generated in (except
423         /// for broadcast messages, where ordering isn't as strict).
424         pub(super) pending_msg_events: Vec<MessageSendEvent>,
425 }
426
427 /// Events which we process internally but cannot be procsesed immediately at the generation site
428 /// for some reason. They are handled in timer_tick_occurred, so may be processed with
429 /// quite some time lag.
430 enum BackgroundEvent {
431         /// Handle a ChannelMonitorUpdate that closes a channel, broadcasting its current latest holder
432         /// commitment transaction.
433         ClosingMonitorUpdate((OutPoint, ChannelMonitorUpdate)),
434 }
435
436 /// State we hold per-peer. In the future we should put channels in here, but for now we only hold
437 /// the latest Init features we heard from the peer.
438 struct PeerState {
439         latest_features: InitFeatures,
440 }
441
442 /// Stores a PaymentSecret and any other data we may need to validate an inbound payment is
443 /// actually ours and not some duplicate HTLC sent to us by a node along the route.
444 ///
445 /// For users who don't want to bother doing their own payment preimage storage, we also store that
446 /// here.
447 ///
448 /// Note that this struct will be removed entirely soon, in favor of storing no inbound payment data
449 /// and instead encoding it in the payment secret.
450 struct PendingInboundPayment {
451         /// The payment secret that the sender must use for us to accept this payment
452         payment_secret: PaymentSecret,
453         /// Time at which this HTLC expires - blocks with a header time above this value will result in
454         /// this payment being removed.
455         expiry_time: u64,
456         /// Arbitrary identifier the user specifies (or not)
457         user_payment_id: u64,
458         // Other required attributes of the payment, optionally enforced:
459         payment_preimage: Option<PaymentPreimage>,
460         min_value_msat: Option<u64>,
461 }
462
463 /// Stores the session_priv for each part of a payment that is still pending. For versions 0.0.102
464 /// and later, also stores information for retrying the payment.
465 pub(crate) enum PendingOutboundPayment {
466         Legacy {
467                 session_privs: HashSet<[u8; 32]>,
468         },
469         Retryable {
470                 session_privs: HashSet<[u8; 32]>,
471                 payment_hash: PaymentHash,
472                 payment_secret: Option<PaymentSecret>,
473                 pending_amt_msat: u64,
474                 /// Used to track the fee paid. Only present if the payment was serialized on 0.0.103+.
475                 pending_fee_msat: Option<u64>,
476                 /// The total payment amount across all paths, used to verify that a retry is not overpaying.
477                 total_msat: u64,
478                 /// Our best known block height at the time this payment was initiated.
479                 starting_block_height: u32,
480         },
481         /// When a pending payment is fulfilled, we continue tracking it until all pending HTLCs have
482         /// been resolved. This ensures we don't look up pending payments in ChannelMonitors on restart
483         /// and add a pending payment that was already fulfilled.
484         Fulfilled {
485                 session_privs: HashSet<[u8; 32]>,
486                 payment_hash: Option<PaymentHash>,
487         },
488         /// When a payer gives up trying to retry a payment, they inform us, letting us generate a
489         /// `PaymentFailed` event when all HTLCs have irrevocably failed. This avoids a number of race
490         /// conditions in MPP-aware payment retriers (1), where the possibility of multiple
491         /// `PaymentPathFailed` events with `all_paths_failed` can be pending at once, confusing a
492         /// downstream event handler as to when a payment has actually failed.
493         ///
494         /// (1) https://github.com/lightningdevkit/rust-lightning/issues/1164
495         Abandoned {
496                 session_privs: HashSet<[u8; 32]>,
497                 payment_hash: PaymentHash,
498         },
499 }
500
501 impl PendingOutboundPayment {
502         fn is_retryable(&self) -> bool {
503                 match self {
504                         PendingOutboundPayment::Retryable { .. } => true,
505                         _ => false,
506                 }
507         }
508         fn is_fulfilled(&self) -> bool {
509                 match self {
510                         PendingOutboundPayment::Fulfilled { .. } => true,
511                         _ => false,
512                 }
513         }
514         fn abandoned(&self) -> bool {
515                 match self {
516                         PendingOutboundPayment::Abandoned { .. } => true,
517                         _ => false,
518                 }
519         }
520         fn get_pending_fee_msat(&self) -> Option<u64> {
521                 match self {
522                         PendingOutboundPayment::Retryable { pending_fee_msat, .. } => pending_fee_msat.clone(),
523                         _ => None,
524                 }
525         }
526
527         fn payment_hash(&self) -> Option<PaymentHash> {
528                 match self {
529                         PendingOutboundPayment::Legacy { .. } => None,
530                         PendingOutboundPayment::Retryable { payment_hash, .. } => Some(*payment_hash),
531                         PendingOutboundPayment::Fulfilled { payment_hash, .. } => *payment_hash,
532                         PendingOutboundPayment::Abandoned { payment_hash, .. } => Some(*payment_hash),
533                 }
534         }
535
536         fn mark_fulfilled(&mut self) {
537                 let mut session_privs = HashSet::new();
538                 core::mem::swap(&mut session_privs, match self {
539                         PendingOutboundPayment::Legacy { session_privs } |
540                         PendingOutboundPayment::Retryable { session_privs, .. } |
541                         PendingOutboundPayment::Fulfilled { session_privs, .. } |
542                         PendingOutboundPayment::Abandoned { session_privs, .. }
543                                 => session_privs,
544                 });
545                 let payment_hash = self.payment_hash();
546                 *self = PendingOutboundPayment::Fulfilled { session_privs, payment_hash };
547         }
548
549         fn mark_abandoned(&mut self) -> Result<(), ()> {
550                 let mut session_privs = HashSet::new();
551                 let our_payment_hash;
552                 core::mem::swap(&mut session_privs, match self {
553                         PendingOutboundPayment::Legacy { .. } |
554                         PendingOutboundPayment::Fulfilled { .. } =>
555                                 return Err(()),
556                         PendingOutboundPayment::Retryable { session_privs, payment_hash, .. } |
557                         PendingOutboundPayment::Abandoned { session_privs, payment_hash, .. } => {
558                                 our_payment_hash = *payment_hash;
559                                 session_privs
560                         },
561                 });
562                 *self = PendingOutboundPayment::Abandoned { session_privs, payment_hash: our_payment_hash };
563                 Ok(())
564         }
565
566         /// panics if path is None and !self.is_fulfilled
567         fn remove(&mut self, session_priv: &[u8; 32], path: Option<&Vec<RouteHop>>) -> bool {
568                 let remove_res = match self {
569                         PendingOutboundPayment::Legacy { session_privs } |
570                         PendingOutboundPayment::Retryable { session_privs, .. } |
571                         PendingOutboundPayment::Fulfilled { session_privs, .. } |
572                         PendingOutboundPayment::Abandoned { session_privs, .. } => {
573                                 session_privs.remove(session_priv)
574                         }
575                 };
576                 if remove_res {
577                         if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
578                                 let path = path.expect("Fulfilling a payment should always come with a path");
579                                 let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
580                                 *pending_amt_msat -= path_last_hop.fee_msat;
581                                 if let Some(fee_msat) = pending_fee_msat.as_mut() {
582                                         *fee_msat -= path.get_path_fees();
583                                 }
584                         }
585                 }
586                 remove_res
587         }
588
589         fn insert(&mut self, session_priv: [u8; 32], path: &Vec<RouteHop>) -> bool {
590                 let insert_res = match self {
591                         PendingOutboundPayment::Legacy { session_privs } |
592                         PendingOutboundPayment::Retryable { session_privs, .. } => {
593                                 session_privs.insert(session_priv)
594                         }
595                         PendingOutboundPayment::Fulfilled { .. } => false,
596                         PendingOutboundPayment::Abandoned { .. } => false,
597                 };
598                 if insert_res {
599                         if let PendingOutboundPayment::Retryable { ref mut pending_amt_msat, ref mut pending_fee_msat, .. } = self {
600                                 let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
601                                 *pending_amt_msat += path_last_hop.fee_msat;
602                                 if let Some(fee_msat) = pending_fee_msat.as_mut() {
603                                         *fee_msat += path.get_path_fees();
604                                 }
605                         }
606                 }
607                 insert_res
608         }
609
610         fn remaining_parts(&self) -> usize {
611                 match self {
612                         PendingOutboundPayment::Legacy { session_privs } |
613                         PendingOutboundPayment::Retryable { session_privs, .. } |
614                         PendingOutboundPayment::Fulfilled { session_privs, .. } |
615                         PendingOutboundPayment::Abandoned { session_privs, .. } => {
616                                 session_privs.len()
617                         }
618                 }
619         }
620 }
621
622 /// SimpleArcChannelManager is useful when you need a ChannelManager with a static lifetime, e.g.
623 /// when you're using lightning-net-tokio (since tokio::spawn requires parameters with static
624 /// lifetimes). Other times you can afford a reference, which is more efficient, in which case
625 /// SimpleRefChannelManager is the more appropriate type. Defining these type aliases prevents
626 /// issues such as overly long function definitions. Note that the ChannelManager can take any
627 /// type that implements KeysInterface for its keys manager, but this type alias chooses the
628 /// concrete type of the KeysManager.
629 ///
630 /// (C-not exported) as Arcs don't make sense in bindings
631 pub type SimpleArcChannelManager<M, T, F, L> = ChannelManager<InMemorySigner, Arc<M>, Arc<T>, Arc<KeysManager>, Arc<F>, Arc<L>>;
632
633 /// SimpleRefChannelManager is a type alias for a ChannelManager reference, and is the reference
634 /// counterpart to the SimpleArcChannelManager type alias. Use this type by default when you don't
635 /// need a ChannelManager with a static lifetime. You'll need a static lifetime in cases such as
636 /// usage of lightning-net-tokio (since tokio::spawn requires parameters with static lifetimes).
637 /// But if this is not necessary, using a reference is more efficient. Defining these type aliases
638 /// helps with issues such as long function definitions. Note that the ChannelManager can take any
639 /// type that implements KeysInterface for its keys manager, but this type alias chooses the
640 /// concrete type of the KeysManager.
641 ///
642 /// (C-not exported) as Arcs don't make sense in bindings
643 pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManager<InMemorySigner, &'a M, &'b T, &'c KeysManager, &'d F, &'e L>;
644
645 /// Manager which keeps track of a number of channels and sends messages to the appropriate
646 /// channel, also tracking HTLC preimages and forwarding onion packets appropriately.
647 ///
648 /// Implements ChannelMessageHandler, handling the multi-channel parts and passing things through
649 /// to individual Channels.
650 ///
651 /// Implements Writeable to write out all channel state to disk. Implies peer_disconnected() for
652 /// all peers during write/read (though does not modify this instance, only the instance being
653 /// serialized). This will result in any channels which have not yet exchanged funding_created (ie
654 /// called funding_transaction_generated for outbound channels).
655 ///
656 /// Note that you can be a bit lazier about writing out ChannelManager than you can be with
657 /// ChannelMonitors. With ChannelMonitors you MUST write each monitor update out to disk before
658 /// returning from chain::Watch::watch_/update_channel, with ChannelManagers, writing updates
659 /// happens out-of-band (and will prevent any other ChannelManager operations from occurring during
660 /// the serialization process). If the deserialized version is out-of-date compared to the
661 /// ChannelMonitors passed by reference to read(), those channels will be force-closed based on the
662 /// ChannelMonitor state and no funds will be lost (mod on-chain transaction fees).
663 ///
664 /// Note that the deserializer is only implemented for (BlockHash, ChannelManager), which
665 /// tells you the last block hash which was block_connect()ed. You MUST rescan any blocks along
666 /// the "reorg path" (ie call block_disconnected() until you get to a common block and then call
667 /// block_connected() to step towards your best block) upon deserialization before using the
668 /// object!
669 ///
670 /// Note that ChannelManager is responsible for tracking liveness of its channels and generating
671 /// ChannelUpdate messages informing peers that the channel is temporarily disabled. To avoid
672 /// spam due to quick disconnection/reconnection, updates are not sent until the channel has been
673 /// offline for a full minute. In order to track this, you must call
674 /// timer_tick_occurred roughly once per minute, though it doesn't have to be perfect.
675 ///
676 /// Rather than using a plain ChannelManager, it is preferable to use either a SimpleArcChannelManager
677 /// a SimpleRefChannelManager, for conciseness. See their documentation for more details, but
678 /// essentially you should default to using a SimpleRefChannelManager, and use a
679 /// SimpleArcChannelManager when you require a ChannelManager with a static lifetime, such as when
680 /// you're using lightning-net-tokio.
681 pub struct ChannelManager<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
682         where M::Target: chain::Watch<Signer>,
683         T::Target: BroadcasterInterface,
684         K::Target: KeysInterface<Signer = Signer>,
685         F::Target: FeeEstimator,
686                                 L::Target: Logger,
687 {
688         default_configuration: UserConfig,
689         genesis_hash: BlockHash,
690         fee_estimator: F,
691         chain_monitor: M,
692         tx_broadcaster: T,
693
694         #[cfg(test)]
695         pub(super) best_block: RwLock<BestBlock>,
696         #[cfg(not(test))]
697         best_block: RwLock<BestBlock>,
698         secp_ctx: Secp256k1<secp256k1::All>,
699
700         #[cfg(any(test, feature = "_test_utils"))]
701         pub(super) channel_state: Mutex<ChannelHolder<Signer>>,
702         #[cfg(not(any(test, feature = "_test_utils")))]
703         channel_state: Mutex<ChannelHolder<Signer>>,
704
705         /// Storage for PaymentSecrets and any requirements on future inbound payments before we will
706         /// expose them to users via a PaymentReceived event. HTLCs which do not meet the requirements
707         /// here are failed when we process them as pending-forwardable-HTLCs, and entries are removed
708         /// after we generate a PaymentReceived upon receipt of all MPP parts or when they time out.
709         /// Locked *after* channel_state.
710         pending_inbound_payments: Mutex<HashMap<PaymentHash, PendingInboundPayment>>,
711
712         /// The session_priv bytes and retry metadata of outbound payments which are pending resolution.
713         /// The authoritative state of these HTLCs resides either within Channels or ChannelMonitors
714         /// (if the channel has been force-closed), however we track them here to prevent duplicative
715         /// PaymentSent/PaymentPathFailed events. Specifically, in the case of a duplicative
716         /// update_fulfill_htlc message after a reconnect, we may "claim" a payment twice.
717         /// Additionally, because ChannelMonitors are often not re-serialized after connecting block(s)
718         /// which may generate a claim event, we may receive similar duplicate claim/fail MonitorEvents
719         /// after reloading from disk while replaying blocks against ChannelMonitors.
720         ///
721         /// See `PendingOutboundPayment` documentation for more info.
722         ///
723         /// Locked *after* channel_state.
724         pending_outbound_payments: Mutex<HashMap<PaymentId, PendingOutboundPayment>>,
725
726         /// The set of outbound SCID aliases across all our channels, including unconfirmed channels
727         /// and some closed channels which reached a usable state prior to being closed. This is used
728         /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the
729         /// active channel list on load.
730         outbound_scid_aliases: Mutex<HashSet<u64>>,
731
732         our_network_key: SecretKey,
733         our_network_pubkey: PublicKey,
734
735         inbound_payment_key: inbound_payment::ExpandedKey,
736
737         /// LDK puts the [fake scids] that it generates into namespaces, to identify the type of an
738         /// incoming payment. To make it harder for a third-party to identify the type of a payment,
739         /// we encrypt the namespace identifier using these bytes.
740         ///
741         /// [fake scids]: crate::util::scid_utils::fake_scid
742         fake_scid_rand_bytes: [u8; 32],
743
744         /// Used to track the last value sent in a node_announcement "timestamp" field. We ensure this
745         /// value increases strictly since we don't assume access to a time source.
746         last_node_announcement_serial: AtomicUsize,
747
748         /// The highest block timestamp we've seen, which is usually a good guess at the current time.
749         /// Assuming most miners are generating blocks with reasonable timestamps, this shouldn't be
750         /// very far in the past, and can only ever be up to two hours in the future.
751         highest_seen_timestamp: AtomicUsize,
752
753         /// The bulk of our storage will eventually be here (channels and message queues and the like).
754         /// If we are connected to a peer we always at least have an entry here, even if no channels
755         /// are currently open with that peer.
756         /// Because adding or removing an entry is rare, we usually take an outer read lock and then
757         /// operate on the inner value freely. Sadly, this prevents parallel operation when opening a
758         /// new channel.
759         ///
760         /// If also holding `channel_state` lock, must lock `channel_state` prior to `per_peer_state`.
761         per_peer_state: RwLock<HashMap<PublicKey, Mutex<PeerState>>>,
762
763         pending_events: Mutex<Vec<events::Event>>,
764         pending_background_events: Mutex<Vec<BackgroundEvent>>,
765         /// Used when we have to take a BIG lock to make sure everything is self-consistent.
766         /// Essentially just when we're serializing ourselves out.
767         /// Taken first everywhere where we are making changes before any other locks.
768         /// When acquiring this lock in read mode, rather than acquiring it directly, call
769         /// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
770         /// PersistenceNotifier the lock contains sends out a notification when the lock is released.
771         total_consistency_lock: RwLock<()>,
772
773         persistence_notifier: PersistenceNotifier,
774
775         keys_manager: K,
776
777         logger: L,
778 }
779
780 /// Chain-related parameters used to construct a new `ChannelManager`.
781 ///
782 /// Typically, the block-specific parameters are derived from the best block hash for the network,
783 /// as a newly constructed `ChannelManager` will not have created any channels yet. These parameters
784 /// are not needed when deserializing a previously constructed `ChannelManager`.
785 #[derive(Clone, Copy, PartialEq)]
786 pub struct ChainParameters {
787         /// The network for determining the `chain_hash` in Lightning messages.
788         pub network: Network,
789
790         /// The hash and height of the latest block successfully connected.
791         ///
792         /// Used to track on-chain channel funding outputs and send payments with reliable timelocks.
793         pub best_block: BestBlock,
794 }
795
796 #[derive(Copy, Clone, PartialEq)]
797 enum NotifyOption {
798         DoPersist,
799         SkipPersist,
800 }
801
802 /// Whenever we release the `ChannelManager`'s `total_consistency_lock`, from read mode, it is
803 /// desirable to notify any listeners on `await_persistable_update_timeout`/
804 /// `await_persistable_update` when new updates are available for persistence. Therefore, this
805 /// struct is responsible for locking the total consistency lock and, upon going out of scope,
806 /// sending the aforementioned notification (since the lock being released indicates that the
807 /// updates are ready for persistence).
808 ///
809 /// We allow callers to either always notify by constructing with `notify_on_drop` or choose to
810 /// notify or not based on whether relevant changes have been made, providing a closure to
811 /// `optionally_notify` which returns a `NotifyOption`.
812 struct PersistenceNotifierGuard<'a, F: Fn() -> NotifyOption> {
813         persistence_notifier: &'a PersistenceNotifier,
814         should_persist: F,
815         // We hold onto this result so the lock doesn't get released immediately.
816         _read_guard: RwLockReadGuard<'a, ()>,
817 }
818
819 impl<'a> PersistenceNotifierGuard<'a, fn() -> NotifyOption> { // We don't care what the concrete F is here, it's unused
820         fn notify_on_drop(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier) -> PersistenceNotifierGuard<'a, impl Fn() -> NotifyOption> {
821                 PersistenceNotifierGuard::optionally_notify(lock, notifier, || -> NotifyOption { NotifyOption::DoPersist })
822         }
823
824         fn optionally_notify<F: Fn() -> NotifyOption>(lock: &'a RwLock<()>, notifier: &'a PersistenceNotifier, persist_check: F) -> PersistenceNotifierGuard<'a, F> {
825                 let read_guard = lock.read().unwrap();
826
827                 PersistenceNotifierGuard {
828                         persistence_notifier: notifier,
829                         should_persist: persist_check,
830                         _read_guard: read_guard,
831                 }
832         }
833 }
834
835 impl<'a, F: Fn() -> NotifyOption> Drop for PersistenceNotifierGuard<'a, F> {
836         fn drop(&mut self) {
837                 if (self.should_persist)() == NotifyOption::DoPersist {
838                         self.persistence_notifier.notify();
839                 }
840         }
841 }
842
843 /// The amount of time in blocks we require our counterparty wait to claim their money (ie time
844 /// between when we, or our watchtower, must check for them having broadcast a theft transaction).
845 ///
846 /// This can be increased (but not decreased) through [`ChannelHandshakeConfig::our_to_self_delay`]
847 ///
848 /// [`ChannelHandshakeConfig::our_to_self_delay`]: crate::util::config::ChannelHandshakeConfig::our_to_self_delay
849 pub const BREAKDOWN_TIMEOUT: u16 = 6 * 24;
850 /// The amount of time in blocks we're willing to wait to claim money back to us. This matches
851 /// the maximum required amount in lnd as of March 2021.
852 pub(crate) const MAX_LOCAL_BREAKDOWN_TIMEOUT: u16 = 2 * 6 * 24 * 7;
853
854 /// The minimum number of blocks between an inbound HTLC's CLTV and the corresponding outbound
855 /// HTLC's CLTV. The current default represents roughly seven hours of blocks at six blocks/hour.
856 ///
857 /// This can be increased (but not decreased) through [`ChannelConfig::cltv_expiry_delta`]
858 ///
859 /// [`ChannelConfig::cltv_expiry_delta`]: crate::util::config::ChannelConfig::cltv_expiry_delta
860 // This should always be a few blocks greater than channelmonitor::CLTV_CLAIM_BUFFER,
861 // i.e. the node we forwarded the payment on to should always have enough room to reliably time out
862 // the HTLC via a full update_fail_htlc/commitment_signed dance before we hit the
863 // CLTV_CLAIM_BUFFER point (we static assert that it's at least 3 blocks more).
864 pub const MIN_CLTV_EXPIRY_DELTA: u16 = 6*7;
865 pub(super) const CLTV_FAR_FAR_AWAY: u32 = 6 * 24 * 7; //TODO?
866
867 /// Minimum CLTV difference between the current block height and received inbound payments.
868 /// Invoices generated for payment to us must set their `min_final_cltv_expiry` field to at least
869 /// this value.
870 // Note that we fail if exactly HTLC_FAIL_BACK_BUFFER + 1 was used, so we need to add one for
871 // any payments to succeed. Further, we don't want payments to fail if a block was found while
872 // a payment was being routed, so we add an extra block to be safe.
873 pub const MIN_FINAL_CLTV_EXPIRY: u32 = HTLC_FAIL_BACK_BUFFER + 3;
874
875 // Check that our CLTV_EXPIRY is at least CLTV_CLAIM_BUFFER + ANTI_REORG_DELAY + LATENCY_GRACE_PERIOD_BLOCKS,
876 // ie that if the next-hop peer fails the HTLC within
877 // LATENCY_GRACE_PERIOD_BLOCKS then we'll still have CLTV_CLAIM_BUFFER left to timeout it onchain,
878 // then waiting ANTI_REORG_DELAY to be reorg-safe on the outbound HLTC and
879 // failing the corresponding htlc backward, and us now seeing the last block of ANTI_REORG_DELAY before
880 // LATENCY_GRACE_PERIOD_BLOCKS.
881 #[deny(const_err)]
882 #[allow(dead_code)]
883 const CHECK_CLTV_EXPIRY_SANITY: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - CLTV_CLAIM_BUFFER - ANTI_REORG_DELAY - LATENCY_GRACE_PERIOD_BLOCKS;
884
885 // Check for ability of an attacker to make us fail on-chain by delaying an HTLC claim. See
886 // ChannelMonitor::should_broadcast_holder_commitment_txn for a description of why this is needed.
887 #[deny(const_err)]
888 #[allow(dead_code)]
889 const CHECK_CLTV_EXPIRY_SANITY_2: u32 = MIN_CLTV_EXPIRY_DELTA as u32 - LATENCY_GRACE_PERIOD_BLOCKS - 2*CLTV_CLAIM_BUFFER;
890
891 /// The number of blocks before we consider an outbound payment for expiry if it doesn't have any
892 /// pending HTLCs in flight.
893 pub(crate) const PAYMENT_EXPIRY_BLOCKS: u32 = 3;
894
895 /// The number of ticks of [`ChannelManager::timer_tick_occurred`] until expiry of incomplete MPPs
896 pub(crate) const MPP_TIMEOUT_TICKS: u8 = 3;
897
898 /// Information needed for constructing an invoice route hint for this channel.
899 #[derive(Clone, Debug, PartialEq)]
900 pub struct CounterpartyForwardingInfo {
901         /// Base routing fee in millisatoshis.
902         pub fee_base_msat: u32,
903         /// Amount in millionths of a satoshi the channel will charge per transferred satoshi.
904         pub fee_proportional_millionths: u32,
905         /// The minimum difference in cltv_expiry between an ingoing HTLC and its outgoing counterpart,
906         /// such that the outgoing HTLC is forwardable to this counterparty. See `msgs::ChannelUpdate`'s
907         /// `cltv_expiry_delta` for more details.
908         pub cltv_expiry_delta: u16,
909 }
910
911 /// Channel parameters which apply to our counterparty. These are split out from [`ChannelDetails`]
912 /// to better separate parameters.
913 #[derive(Clone, Debug, PartialEq)]
914 pub struct ChannelCounterparty {
915         /// The node_id of our counterparty
916         pub node_id: PublicKey,
917         /// The Features the channel counterparty provided upon last connection.
918         /// Useful for routing as it is the most up-to-date copy of the counterparty's features and
919         /// many routing-relevant features are present in the init context.
920         pub features: InitFeatures,
921         /// The value, in satoshis, that must always be held in the channel for our counterparty. This
922         /// value ensures that if our counterparty broadcasts a revoked state, we can punish them by
923         /// claiming at least this value on chain.
924         ///
925         /// This value is not included in [`inbound_capacity_msat`] as it can never be spent.
926         ///
927         /// [`inbound_capacity_msat`]: ChannelDetails::inbound_capacity_msat
928         pub unspendable_punishment_reserve: u64,
929         /// Information on the fees and requirements that the counterparty requires when forwarding
930         /// payments to us through this channel.
931         pub forwarding_info: Option<CounterpartyForwardingInfo>,
932         /// The smallest value HTLC (in msat) the remote peer will accept, for this channel. This field
933         /// is only `None` before we have received either the `OpenChannel` or `AcceptChannel` message
934         /// from the remote peer, or for `ChannelCounterparty` objects serialized prior to LDK 0.0.107.
935         pub outbound_htlc_minimum_msat: Option<u64>,
936         /// The largest value HTLC (in msat) the remote peer currently will accept, for this channel.
937         pub outbound_htlc_maximum_msat: Option<u64>,
938 }
939
940 /// Details of a channel, as returned by ChannelManager::list_channels and ChannelManager::list_usable_channels
941 #[derive(Clone, Debug, PartialEq)]
942 pub struct ChannelDetails {
943         /// The channel's ID (prior to funding transaction generation, this is a random 32 bytes,
944         /// thereafter this is the txid of the funding transaction xor the funding transaction output).
945         /// Note that this means this value is *not* persistent - it can change once during the
946         /// lifetime of the channel.
947         pub channel_id: [u8; 32],
948         /// Parameters which apply to our counterparty. See individual fields for more information.
949         pub counterparty: ChannelCounterparty,
950         /// The Channel's funding transaction output, if we've negotiated the funding transaction with
951         /// our counterparty already.
952         ///
953         /// Note that, if this has been set, `channel_id` will be equivalent to
954         /// `funding_txo.unwrap().to_channel_id()`.
955         pub funding_txo: Option<OutPoint>,
956         /// The features which this channel operates with. See individual features for more info.
957         ///
958         /// `None` until negotiation completes and the channel type is finalized.
959         pub channel_type: Option<ChannelTypeFeatures>,
960         /// The position of the funding transaction in the chain. None if the funding transaction has
961         /// not yet been confirmed and the channel fully opened.
962         ///
963         /// Note that if [`inbound_scid_alias`] is set, it must be used for invoices and inbound
964         /// payments instead of this. See [`get_inbound_payment_scid`].
965         ///
966         /// [`inbound_scid_alias`]: Self::inbound_scid_alias
967         /// [`get_inbound_payment_scid`]: Self::get_inbound_payment_scid
968         pub short_channel_id: Option<u64>,
969         /// An optional [`short_channel_id`] alias for this channel, randomly generated by our
970         /// counterparty and usable in place of [`short_channel_id`] in invoice route hints. Our
971         /// counterparty will recognize the alias provided here in place of the [`short_channel_id`]
972         /// when they see a payment to be routed to us.
973         ///
974         /// Our counterparty may choose to rotate this value at any time, though will always recognize
975         /// previous values for inbound payment forwarding.
976         ///
977         /// [`short_channel_id`]: Self::short_channel_id
978         pub inbound_scid_alias: Option<u64>,
979         /// The value, in satoshis, of this channel as appears in the funding output
980         pub channel_value_satoshis: u64,
981         /// The value, in satoshis, that must always be held in the channel for us. This value ensures
982         /// that if we broadcast a revoked state, our counterparty can punish us by claiming at least
983         /// this value on chain.
984         ///
985         /// This value is not included in [`outbound_capacity_msat`] as it can never be spent.
986         ///
987         /// This value will be `None` for outbound channels until the counterparty accepts the channel.
988         ///
989         /// [`outbound_capacity_msat`]: ChannelDetails::outbound_capacity_msat
990         pub unspendable_punishment_reserve: Option<u64>,
991         /// The `user_channel_id` passed in to create_channel, or 0 if the channel was inbound.
992         pub user_channel_id: u64,
993         /// Our total balance.  This is the amount we would get if we close the channel.
994         /// This value is not exact. Due to various in-flight changes and feerate changes, exactly this
995         /// amount is not likely to be recoverable on close.
996         ///
997         /// This does not include any pending HTLCs which are not yet fully resolved (and, thus, whose
998         /// balance is not available for inclusion in new outbound HTLCs). This further does not include
999         /// any pending outgoing HTLCs which are awaiting some other resolution to be sent.
1000         /// This does not consider any on-chain fees.
1001         ///
1002         /// See also [`ChannelDetails::outbound_capacity_msat`]
1003         pub balance_msat: u64,
1004         /// The available outbound capacity for sending HTLCs to the remote peer. This does not include
1005         /// any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
1006         /// available for inclusion in new outbound HTLCs). This further does not include any pending
1007         /// outgoing HTLCs which are awaiting some other resolution to be sent.
1008         ///
1009         /// See also [`ChannelDetails::balance_msat`]
1010         ///
1011         /// This value is not exact. Due to various in-flight changes, feerate changes, and our
1012         /// conflict-avoidance policy, exactly this amount is not likely to be spendable. However, we
1013         /// should be able to spend nearly this amount.
1014         pub outbound_capacity_msat: u64,
1015         /// The available outbound capacity for sending a single HTLC to the remote peer. This is
1016         /// similar to [`ChannelDetails::outbound_capacity_msat`] but it may be further restricted by
1017         /// the current state and per-HTLC limit(s). This is intended for use when routing, allowing us
1018         /// to use a limit as close as possible to the HTLC limit we can currently send.
1019         ///
1020         /// See also [`ChannelDetails::balance_msat`] and [`ChannelDetails::outbound_capacity_msat`].
1021         pub next_outbound_htlc_limit_msat: u64,
1022         /// The available inbound capacity for the remote peer to send HTLCs to us. This does not
1023         /// include any pending HTLCs which are not yet fully resolved (and, thus, whose balance is not
1024         /// available for inclusion in new inbound HTLCs).
1025         /// Note that there are some corner cases not fully handled here, so the actual available
1026         /// inbound capacity may be slightly higher than this.
1027         ///
1028         /// This value is not exact. Due to various in-flight changes, feerate changes, and our
1029         /// counterparty's conflict-avoidance policy, exactly this amount is not likely to be spendable.
1030         /// However, our counterparty should be able to spend nearly this amount.
1031         pub inbound_capacity_msat: u64,
1032         /// The number of required confirmations on the funding transaction before the funding will be
1033         /// considered "locked". This number is selected by the channel fundee (i.e. us if
1034         /// [`is_outbound`] is *not* set), and can be selected for inbound channels with
1035         /// [`ChannelHandshakeConfig::minimum_depth`] or limited for outbound channels with
1036         /// [`ChannelHandshakeLimits::max_minimum_depth`].
1037         ///
1038         /// This value will be `None` for outbound channels until the counterparty accepts the channel.
1039         ///
1040         /// [`is_outbound`]: ChannelDetails::is_outbound
1041         /// [`ChannelHandshakeConfig::minimum_depth`]: crate::util::config::ChannelHandshakeConfig::minimum_depth
1042         /// [`ChannelHandshakeLimits::max_minimum_depth`]: crate::util::config::ChannelHandshakeLimits::max_minimum_depth
1043         pub confirmations_required: Option<u32>,
1044         /// The number of blocks (after our commitment transaction confirms) that we will need to wait
1045         /// until we can claim our funds after we force-close the channel. During this time our
1046         /// counterparty is allowed to punish us if we broadcasted a stale state. If our counterparty
1047         /// force-closes the channel and broadcasts a commitment transaction we do not have to wait any
1048         /// time to claim our non-HTLC-encumbered funds.
1049         ///
1050         /// This value will be `None` for outbound channels until the counterparty accepts the channel.
1051         pub force_close_spend_delay: Option<u16>,
1052         /// True if the channel was initiated (and thus funded) by us.
1053         pub is_outbound: bool,
1054         /// True if the channel is confirmed, funding_locked messages have been exchanged, and the
1055         /// channel is not currently being shut down. `funding_locked` message exchange implies the
1056         /// required confirmation count has been reached (and we were connected to the peer at some
1057         /// point after the funding transaction received enough confirmations). The required
1058         /// confirmation count is provided in [`confirmations_required`].
1059         ///
1060         /// [`confirmations_required`]: ChannelDetails::confirmations_required
1061         pub is_funding_locked: bool,
1062         /// True if the channel is (a) confirmed and funding_locked messages have been exchanged, (b)
1063         /// the peer is connected, and (c) the channel is not currently negotiating a shutdown.
1064         ///
1065         /// This is a strict superset of `is_funding_locked`.
1066         pub is_usable: bool,
1067         /// True if this channel is (or will be) publicly-announced.
1068         pub is_public: bool,
1069         /// The smallest value HTLC (in msat) we will accept, for this channel. This field
1070         /// is only `None` for `ChannelDetails` objects serialized prior to LDK 0.0.107
1071         pub inbound_htlc_minimum_msat: Option<u64>,
1072         /// The largest value HTLC (in msat) we currently will accept, for this channel.
1073         pub inbound_htlc_maximum_msat: Option<u64>,
1074 }
1075
1076 impl ChannelDetails {
1077         /// Gets the current SCID which should be used to identify this channel for inbound payments.
1078         /// This should be used for providing invoice hints or in any other context where our
1079         /// counterparty will forward a payment to us.
1080         ///
1081         /// This is either the [`ChannelDetails::inbound_scid_alias`], if set, or the
1082         /// [`ChannelDetails::short_channel_id`]. See those for more information.
1083         pub fn get_inbound_payment_scid(&self) -> Option<u64> {
1084                 self.inbound_scid_alias.or(self.short_channel_id)
1085         }
1086 }
1087
1088 /// If a payment fails to send, it can be in one of several states. This enum is returned as the
1089 /// Err() type describing which state the payment is in, see the description of individual enum
1090 /// states for more.
1091 #[derive(Clone, Debug)]
1092 pub enum PaymentSendFailure {
1093         /// A parameter which was passed to send_payment was invalid, preventing us from attempting to
1094         /// send the payment at all. No channel state has been changed or messages sent to peers, and
1095         /// once you've changed the parameter at error, you can freely retry the payment in full.
1096         ParameterError(APIError),
1097         /// A parameter in a single path which was passed to send_payment was invalid, preventing us
1098         /// from attempting to send the payment at all. No channel state has been changed or messages
1099         /// sent to peers, and once you've changed the parameter at error, you can freely retry the
1100         /// payment in full.
1101         ///
1102         /// The results here are ordered the same as the paths in the route object which was passed to
1103         /// send_payment.
1104         PathParameterError(Vec<Result<(), APIError>>),
1105         /// All paths which were attempted failed to send, with no channel state change taking place.
1106         /// You can freely retry the payment in full (though you probably want to do so over different
1107         /// paths than the ones selected).
1108         AllFailedRetrySafe(Vec<APIError>),
1109         /// Some paths which were attempted failed to send, though possibly not all. At least some
1110         /// paths have irrevocably committed to the HTLC and retrying the payment in full would result
1111         /// in over-/re-payment.
1112         ///
1113         /// The results here are ordered the same as the paths in the route object which was passed to
1114         /// send_payment, and any Errs which are not APIError::MonitorUpdateFailed can be safely
1115         /// retried (though there is currently no API with which to do so).
1116         ///
1117         /// Any entries which contain Err(APIError::MonitorUpdateFailed) or Ok(()) MUST NOT be retried
1118         /// as they will result in over-/re-payment. These HTLCs all either successfully sent (in the
1119         /// case of Ok(())) or will send once channel_monitor_updated is called on the next-hop channel
1120         /// with the latest update_id.
1121         PartialFailure {
1122                 /// The errors themselves, in the same order as the route hops.
1123                 results: Vec<Result<(), APIError>>,
1124                 /// If some paths failed without irrevocably committing to the new HTLC(s), this will
1125                 /// contain a [`RouteParameters`] object which can be used to calculate a new route that
1126                 /// will pay all remaining unpaid balance.
1127                 failed_paths_retry: Option<RouteParameters>,
1128                 /// The payment id for the payment, which is now at least partially pending.
1129                 payment_id: PaymentId,
1130         },
1131 }
1132
1133 /// Route hints used in constructing invoices for [phantom node payents].
1134 ///
1135 /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
1136 #[derive(Clone)]
1137 pub struct PhantomRouteHints {
1138         /// The list of channels to be included in the invoice route hints.
1139         pub channels: Vec<ChannelDetails>,
1140         /// A fake scid used for representing the phantom node's fake channel in generating the invoice
1141         /// route hints.
1142         pub phantom_scid: u64,
1143         /// The pubkey of the real backing node that would ultimately receive the payment.
1144         pub real_node_pubkey: PublicKey,
1145 }
1146
1147 macro_rules! handle_error {
1148         ($self: ident, $internal: expr, $counterparty_node_id: expr) => {
1149                 match $internal {
1150                         Ok(msg) => Ok(msg),
1151                         Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
1152                                 #[cfg(debug_assertions)]
1153                                 {
1154                                         // In testing, ensure there are no deadlocks where the lock is already held upon
1155                                         // entering the macro.
1156                                         assert!($self.channel_state.try_lock().is_ok());
1157                                         assert!($self.pending_events.try_lock().is_ok());
1158                                 }
1159
1160                                 let mut msg_events = Vec::with_capacity(2);
1161
1162                                 if let Some((shutdown_res, update_option)) = shutdown_finish {
1163                                         $self.finish_force_close_channel(shutdown_res);
1164                                         if let Some(update) = update_option {
1165                                                 msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1166                                                         msg: update
1167                                                 });
1168                                         }
1169                                         if let Some((channel_id, user_channel_id)) = chan_id {
1170                                                 $self.pending_events.lock().unwrap().push(events::Event::ChannelClosed {
1171                                                         channel_id, user_channel_id,
1172                                                         reason: ClosureReason::ProcessingError { err: err.err.clone() }
1173                                                 });
1174                                         }
1175                                 }
1176
1177                                 log_error!($self.logger, "{}", err.err);
1178                                 if let msgs::ErrorAction::IgnoreError = err.action {
1179                                 } else {
1180                                         msg_events.push(events::MessageSendEvent::HandleError {
1181                                                 node_id: $counterparty_node_id,
1182                                                 action: err.action.clone()
1183                                         });
1184                                 }
1185
1186                                 if !msg_events.is_empty() {
1187                                         $self.channel_state.lock().unwrap().pending_msg_events.append(&mut msg_events);
1188                                 }
1189
1190                                 // Return error in case higher-API need one
1191                                 Err(err)
1192                         },
1193                 }
1194         }
1195 }
1196
1197 macro_rules! update_maps_on_chan_removal {
1198         ($self: expr, $short_to_id: expr, $channel: expr) => {
1199                 if let Some(short_id) = $channel.get_short_channel_id() {
1200                         $short_to_id.remove(&short_id);
1201                 } else {
1202                         // If the channel was never confirmed on-chain prior to its closure, remove the
1203                         // outbound SCID alias we used for it from the collision-prevention set. While we
1204                         // generally want to avoid ever re-using an outbound SCID alias across all channels, we
1205                         // also don't want a counterparty to be able to trivially cause a memory leak by simply
1206                         // opening a million channels with us which are closed before we ever reach the funding
1207                         // stage.
1208                         let alias_removed = $self.outbound_scid_aliases.lock().unwrap().remove(&$channel.outbound_scid_alias());
1209                         debug_assert!(alias_removed);
1210                 }
1211                 $short_to_id.remove(&$channel.outbound_scid_alias());
1212         }
1213 }
1214
1215 /// Returns (boolean indicating if we should remove the Channel object from memory, a mapped error)
1216 macro_rules! convert_chan_err {
1217         ($self: ident, $err: expr, $short_to_id: expr, $channel: expr, $channel_id: expr) => {
1218                 match $err {
1219                         ChannelError::Warn(msg) => {
1220                                 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Warn(msg), $channel_id.clone()))
1221                         },
1222                         ChannelError::Ignore(msg) => {
1223                                 (false, MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore(msg), $channel_id.clone()))
1224                         },
1225                         ChannelError::Close(msg) => {
1226                                 log_error!($self.logger, "Closing channel {} due to close-required error: {}", log_bytes!($channel_id[..]), msg);
1227                                 update_maps_on_chan_removal!($self, $short_to_id, $channel);
1228                                 let shutdown_res = $channel.force_shutdown(true);
1229                                 (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
1230                                         shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
1231                         },
1232                         ChannelError::CloseDelayBroadcast(msg) => {
1233                                 log_error!($self.logger, "Channel {} need to be shutdown but closing transactions not broadcast due to {}", log_bytes!($channel_id[..]), msg);
1234                                 update_maps_on_chan_removal!($self, $short_to_id, $channel);
1235                                 let shutdown_res = $channel.force_shutdown(false);
1236                                 (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.get_user_id(),
1237                                         shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
1238                         }
1239                 }
1240         }
1241 }
1242
1243 macro_rules! break_chan_entry {
1244         ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
1245                 match $res {
1246                         Ok(res) => res,
1247                         Err(e) => {
1248                                 let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
1249                                 if drop {
1250                                         $entry.remove_entry();
1251                                 }
1252                                 break Err(res);
1253                         }
1254                 }
1255         }
1256 }
1257
1258 macro_rules! try_chan_entry {
1259         ($self: ident, $res: expr, $channel_state: expr, $entry: expr) => {
1260                 match $res {
1261                         Ok(res) => res,
1262                         Err(e) => {
1263                                 let (drop, res) = convert_chan_err!($self, e, $channel_state.short_to_id, $entry.get_mut(), $entry.key());
1264                                 if drop {
1265                                         $entry.remove_entry();
1266                                 }
1267                                 return Err(res);
1268                         }
1269                 }
1270         }
1271 }
1272
1273 macro_rules! remove_channel {
1274         ($self: expr, $channel_state: expr, $entry: expr) => {
1275                 {
1276                         let channel = $entry.remove_entry().1;
1277                         update_maps_on_chan_removal!($self, $channel_state.short_to_id, channel);
1278                         channel
1279                 }
1280         }
1281 }
1282
1283 macro_rules! handle_monitor_err {
1284         ($self: ident, $err: expr, $short_to_id: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
1285                 match $err {
1286                         ChannelMonitorUpdateErr::PermanentFailure => {
1287                                 log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateErr::PermanentFailure", log_bytes!($chan_id[..]));
1288                                 update_maps_on_chan_removal!($self, $short_to_id, $chan);
1289                                 // TODO: $failed_fails is dropped here, which will cause other channels to hit the
1290                                 // chain in a confused state! We need to move them into the ChannelMonitor which
1291                                 // will be responsible for failing backwards once things confirm on-chain.
1292                                 // It's ok that we drop $failed_forwards here - at this point we'd rather they
1293                                 // broadcast HTLC-Timeout and pay the associated fees to get their funds back than
1294                                 // us bother trying to claim it just to forward on to another peer. If we're
1295                                 // splitting hairs we'd prefer to claim payments that were to us, but we haven't
1296                                 // given up the preimage yet, so might as well just wait until the payment is
1297                                 // retried, avoiding the on-chain fees.
1298                                 let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
1299                                                 $chan.force_shutdown(true), $self.get_channel_update_for_broadcast(&$chan).ok() ));
1300                                 (res, true)
1301                         },
1302                         ChannelMonitorUpdateErr::TemporaryFailure => {
1303                                 log_info!($self.logger, "Disabling channel {} due to monitor update TemporaryFailure. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
1304                                                 log_bytes!($chan_id[..]),
1305                                                 if $resend_commitment && $resend_raa {
1306                                                                 match $action_type {
1307                                                                         RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
1308                                                                         RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
1309                                                                 }
1310                                                         } else if $resend_commitment { "commitment" }
1311                                                         else if $resend_raa { "RAA" }
1312                                                         else { "nothing" },
1313                                                 (&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
1314                                                 (&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
1315                                                 (&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
1316                                 if !$resend_commitment {
1317                                         debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
1318                                 }
1319                                 if !$resend_raa {
1320                                         debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
1321                                 }
1322                                 $chan.monitor_update_failed($resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
1323                                 (Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
1324                         },
1325                 }
1326         };
1327         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
1328                 let (res, drop) = handle_monitor_err!($self, $err, $channel_state.short_to_id, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
1329                 if drop {
1330                         $entry.remove_entry();
1331                 }
1332                 res
1333         } };
1334         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
1335                 debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
1336                 handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, true, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1337         } };
1338         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
1339                 handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1340         };
1341         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
1342                 handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, Vec::new(), Vec::new(), Vec::new())
1343         };
1344         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
1345                 handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails, Vec::new())
1346         };
1347 }
1348
1349 macro_rules! return_monitor_err {
1350         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
1351                 return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment);
1352         };
1353         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
1354                 return handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment, $failed_forwards, $failed_fails);
1355         }
1356 }
1357
1358 // Does not break in case of TemporaryFailure!
1359 macro_rules! maybe_break_monitor_err {
1360         ($self: ident, $err: expr, $channel_state: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
1361                 match (handle_monitor_err!($self, $err, $channel_state, $entry, $action_type, $resend_raa, $resend_commitment), $err) {
1362                         (e, ChannelMonitorUpdateErr::PermanentFailure) => {
1363                                 break e;
1364                         },
1365                         (_, ChannelMonitorUpdateErr::TemporaryFailure) => { },
1366                 }
1367         }
1368 }
1369
1370 macro_rules! send_funding_locked {
1371         ($short_to_id: expr, $pending_msg_events: expr, $channel: expr, $funding_locked_msg: expr) => {
1372                 $pending_msg_events.push(events::MessageSendEvent::SendFundingLocked {
1373                         node_id: $channel.get_counterparty_node_id(),
1374                         msg: $funding_locked_msg,
1375                 });
1376                 // Note that we may send a funding locked multiple times for a channel if we reconnect, so
1377                 // we allow collisions, but we shouldn't ever be updating the channel ID pointed to.
1378                 let outbound_alias_insert = $short_to_id.insert($channel.outbound_scid_alias(), $channel.channel_id());
1379                 assert!(outbound_alias_insert.is_none() || outbound_alias_insert.unwrap() == $channel.channel_id(),
1380                         "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
1381                 if let Some(real_scid) = $channel.get_short_channel_id() {
1382                         let scid_insert = $short_to_id.insert(real_scid, $channel.channel_id());
1383                         assert!(scid_insert.is_none() || scid_insert.unwrap() == $channel.channel_id(),
1384                                 "SCIDs should never collide - ensure you weren't behind the chain tip by a full month when creating channels");
1385                 }
1386         }
1387 }
1388
1389 macro_rules! handle_chan_restoration_locked {
1390         ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
1391          $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
1392          $pending_forwards: expr, $funding_broadcastable: expr, $funding_locked: expr, $announcement_sigs: expr) => { {
1393                 let mut htlc_forwards = None;
1394
1395                 let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
1396                 let chanmon_update_is_none = chanmon_update.is_none();
1397                 let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
1398                 let res = loop {
1399                         let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
1400                         if !forwards.is_empty() {
1401                                 htlc_forwards = Some(($channel_entry.get().get_short_channel_id().expect("We can't have pending forwards before funding confirmation"),
1402                                         $channel_entry.get().get_funding_txo().unwrap(), forwards));
1403                         }
1404
1405                         if chanmon_update.is_some() {
1406                                 // On reconnect, we, by definition, only resend a funding_locked if there have been
1407                                 // no commitment updates, so the only channel monitor update which could also be
1408                                 // associated with a funding_locked would be the funding_created/funding_signed
1409                                 // monitor update. That monitor update failing implies that we won't send
1410                                 // funding_locked until it's been updated, so we can't have a funding_locked and a
1411                                 // monitor update here (so we don't bother to handle it correctly below).
1412                                 assert!($funding_locked.is_none());
1413                                 // A channel monitor update makes no sense without either a funding_locked or a
1414                                 // commitment update to process after it. Since we can't have a funding_locked, we
1415                                 // only bother to handle the monitor-update + commitment_update case below.
1416                                 assert!($commitment_update.is_some());
1417                         }
1418
1419                         if let Some(msg) = $funding_locked {
1420                                 // Similar to the above, this implies that we're letting the funding_locked fly
1421                                 // before it should be allowed to.
1422                                 assert!(chanmon_update.is_none());
1423                                 send_funding_locked!($channel_state.short_to_id, $channel_state.pending_msg_events, $channel_entry.get(), msg);
1424                         }
1425                         if let Some(msg) = $announcement_sigs {
1426                                 $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
1427                                         node_id: counterparty_node_id,
1428                                         msg,
1429                                 });
1430                         }
1431
1432                         let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
1433                         if let Some(monitor_update) = chanmon_update {
1434                                 // We only ever broadcast a funding transaction in response to a funding_signed
1435                                 // message and the resulting monitor update. Thus, on channel_reestablish
1436                                 // message handling we can't have a funding transaction to broadcast. When
1437                                 // processing a monitor update finishing resulting in a funding broadcast, we
1438                                 // cannot have a second monitor update, thus this case would indicate a bug.
1439                                 assert!(funding_broadcastable.is_none());
1440                                 // Given we were just reconnected or finished updating a channel monitor, the
1441                                 // only case where we can get a new ChannelMonitorUpdate would be if we also
1442                                 // have some commitment updates to send as well.
1443                                 assert!($commitment_update.is_some());
1444                                 if let Err(e) = $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
1445                                         // channel_reestablish doesn't guarantee the order it returns is sensical
1446                                         // for the messages it returns, but if we're setting what messages to
1447                                         // re-transmit on monitor update success, we need to make sure it is sane.
1448                                         let mut order = $order;
1449                                         if $raa.is_none() {
1450                                                 order = RAACommitmentOrder::CommitmentFirst;
1451                                         }
1452                                         break handle_monitor_err!($self, e, $channel_state, $channel_entry, order, $raa.is_some(), true);
1453                                 }
1454                         }
1455
1456                         macro_rules! handle_cs { () => {
1457                                 if let Some(update) = $commitment_update {
1458                                         $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
1459                                                 node_id: counterparty_node_id,
1460                                                 updates: update,
1461                                         });
1462                                 }
1463                         } }
1464                         macro_rules! handle_raa { () => {
1465                                 if let Some(revoke_and_ack) = $raa {
1466                                         $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
1467                                                 node_id: counterparty_node_id,
1468                                                 msg: revoke_and_ack,
1469                                         });
1470                                 }
1471                         } }
1472                         match $order {
1473                                 RAACommitmentOrder::CommitmentFirst => {
1474                                         handle_cs!();
1475                                         handle_raa!();
1476                                 },
1477                                 RAACommitmentOrder::RevokeAndACKFirst => {
1478                                         handle_raa!();
1479                                         handle_cs!();
1480                                 },
1481                         }
1482                         if let Some(tx) = funding_broadcastable {
1483                                 log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
1484                                 $self.tx_broadcaster.broadcast_transaction(&tx);
1485                         }
1486                         break Ok(());
1487                 };
1488
1489                 if chanmon_update_is_none {
1490                         // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
1491                         // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
1492                         // should *never* end up calling back to `chain_monitor.update_channel()`.
1493                         assert!(res.is_ok());
1494                 }
1495
1496                 (htlc_forwards, res, counterparty_node_id)
1497         } }
1498 }
1499
1500 macro_rules! post_handle_chan_restoration {
1501         ($self: ident, $locked_res: expr) => { {
1502                 let (htlc_forwards, res, counterparty_node_id) = $locked_res;
1503
1504                 let _ = handle_error!($self, res, counterparty_node_id);
1505
1506                 if let Some(forwards) = htlc_forwards {
1507                         $self.forward_htlcs(&mut [forwards][..]);
1508                 }
1509         } }
1510 }
1511
1512 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
1513         where M::Target: chain::Watch<Signer>,
1514         T::Target: BroadcasterInterface,
1515         K::Target: KeysInterface<Signer = Signer>,
1516         F::Target: FeeEstimator,
1517         L::Target: Logger,
1518 {
1519         /// Constructs a new ChannelManager to hold several channels and route between them.
1520         ///
1521         /// This is the main "logic hub" for all channel-related actions, and implements
1522         /// ChannelMessageHandler.
1523         ///
1524         /// Non-proportional fees are fixed according to our risk using the provided fee estimator.
1525         ///
1526         /// Users need to notify the new ChannelManager when a new block is connected or
1527         /// disconnected using its `block_connected` and `block_disconnected` methods, starting
1528         /// from after `params.latest_hash`.
1529         pub fn new(fee_est: F, chain_monitor: M, tx_broadcaster: T, logger: L, keys_manager: K, config: UserConfig, params: ChainParameters) -> Self {
1530                 let mut secp_ctx = Secp256k1::new();
1531                 secp_ctx.seeded_randomize(&keys_manager.get_secure_random_bytes());
1532                 let inbound_pmt_key_material = keys_manager.get_inbound_payment_key_material();
1533                 let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
1534                 ChannelManager {
1535                         default_configuration: config.clone(),
1536                         genesis_hash: genesis_block(params.network).header.block_hash(),
1537                         fee_estimator: fee_est,
1538                         chain_monitor,
1539                         tx_broadcaster,
1540
1541                         best_block: RwLock::new(params.best_block),
1542
1543                         channel_state: Mutex::new(ChannelHolder{
1544                                 by_id: HashMap::new(),
1545                                 short_to_id: HashMap::new(),
1546                                 forward_htlcs: HashMap::new(),
1547                                 claimable_htlcs: HashMap::new(),
1548                                 pending_msg_events: Vec::new(),
1549                         }),
1550                         outbound_scid_aliases: Mutex::new(HashSet::new()),
1551                         pending_inbound_payments: Mutex::new(HashMap::new()),
1552                         pending_outbound_payments: Mutex::new(HashMap::new()),
1553
1554                         our_network_key: keys_manager.get_node_secret(Recipient::Node).unwrap(),
1555                         our_network_pubkey: PublicKey::from_secret_key(&secp_ctx, &keys_manager.get_node_secret(Recipient::Node).unwrap()),
1556                         secp_ctx,
1557
1558                         inbound_payment_key: expanded_inbound_key,
1559                         fake_scid_rand_bytes: keys_manager.get_secure_random_bytes(),
1560
1561                         last_node_announcement_serial: AtomicUsize::new(0),
1562                         highest_seen_timestamp: AtomicUsize::new(0),
1563
1564                         per_peer_state: RwLock::new(HashMap::new()),
1565
1566                         pending_events: Mutex::new(Vec::new()),
1567                         pending_background_events: Mutex::new(Vec::new()),
1568                         total_consistency_lock: RwLock::new(()),
1569                         persistence_notifier: PersistenceNotifier::new(),
1570
1571                         keys_manager,
1572
1573                         logger,
1574                 }
1575         }
1576
1577         /// Gets the current configuration applied to all new channels,  as
1578         pub fn get_current_default_configuration(&self) -> &UserConfig {
1579                 &self.default_configuration
1580         }
1581
1582         fn create_and_insert_outbound_scid_alias(&self) -> u64 {
1583                 let height = self.best_block.read().unwrap().height();
1584                 let mut outbound_scid_alias = 0;
1585                 let mut i = 0;
1586                 loop {
1587                         if cfg!(fuzzing) { // fuzzing chacha20 doesn't use the key at all so we always get the same alias
1588                                 outbound_scid_alias += 1;
1589                         } else {
1590                                 outbound_scid_alias = fake_scid::Namespace::OutboundAlias.get_fake_scid(height, &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
1591                         }
1592                         if outbound_scid_alias != 0 && self.outbound_scid_aliases.lock().unwrap().insert(outbound_scid_alias) {
1593                                 break;
1594                         }
1595                         i += 1;
1596                         if i > 1_000_000 { panic!("Your RNG is busted or we ran out of possible outbound SCID aliases (which should never happen before we run out of memory to store channels"); }
1597                 }
1598                 outbound_scid_alias
1599         }
1600
1601         /// Creates a new outbound channel to the given remote node and with the given value.
1602         ///
1603         /// `user_channel_id` will be provided back as in
1604         /// [`Event::FundingGenerationReady::user_channel_id`] to allow tracking of which events
1605         /// correspond with which `create_channel` call. Note that the `user_channel_id` defaults to 0
1606         /// for inbound channels, so you may wish to avoid using 0 for `user_channel_id` here.
1607         /// `user_channel_id` has no meaning inside of LDK, it is simply copied to events and otherwise
1608         /// ignored.
1609         ///
1610         /// Raises [`APIError::APIMisuseError`] when `channel_value_satoshis` > 2**24 or `push_msat` is
1611         /// greater than `channel_value_satoshis * 1k` or `channel_value_satoshis < 1000`.
1612         ///
1613         /// Note that we do not check if you are currently connected to the given peer. If no
1614         /// connection is available, the outbound `open_channel` message may fail to send, resulting in
1615         /// the channel eventually being silently forgotten (dropped on reload).
1616         ///
1617         /// Returns the new Channel's temporary `channel_id`. This ID will appear as
1618         /// [`Event::FundingGenerationReady::temporary_channel_id`] and in
1619         /// [`ChannelDetails::channel_id`] until after
1620         /// [`ChannelManager::funding_transaction_generated`] is called, swapping the Channel's ID for
1621         /// one derived from the funding transaction's TXID. If the counterparty rejects the channel
1622         /// immediately, this temporary ID will appear in [`Event::ChannelClosed::channel_id`].
1623         ///
1624         /// [`Event::FundingGenerationReady::user_channel_id`]: events::Event::FundingGenerationReady::user_channel_id
1625         /// [`Event::FundingGenerationReady::temporary_channel_id`]: events::Event::FundingGenerationReady::temporary_channel_id
1626         /// [`Event::ChannelClosed::channel_id`]: events::Event::ChannelClosed::channel_id
1627         pub fn create_channel(&self, their_network_key: PublicKey, channel_value_satoshis: u64, push_msat: u64, user_channel_id: u64, override_config: Option<UserConfig>) -> Result<[u8; 32], APIError> {
1628                 if channel_value_satoshis < 1000 {
1629                         return Err(APIError::APIMisuseError { err: format!("Channel value must be at least 1000 satoshis. It was {}", channel_value_satoshis) });
1630                 }
1631
1632                 let channel = {
1633                         let per_peer_state = self.per_peer_state.read().unwrap();
1634                         match per_peer_state.get(&their_network_key) {
1635                                 Some(peer_state) => {
1636                                         let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
1637                                         let peer_state = peer_state.lock().unwrap();
1638                                         let their_features = &peer_state.latest_features;
1639                                         let config = if override_config.is_some() { override_config.as_ref().unwrap() } else { &self.default_configuration };
1640                                         match Channel::new_outbound(&self.fee_estimator, &self.keys_manager, their_network_key,
1641                                                 their_features, channel_value_satoshis, push_msat, user_channel_id, config,
1642                                                 self.best_block.read().unwrap().height(), outbound_scid_alias)
1643                                         {
1644                                                 Ok(res) => res,
1645                                                 Err(e) => {
1646                                                         self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
1647                                                         return Err(e);
1648                                                 },
1649                                         }
1650                                 },
1651                                 None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", their_network_key) }),
1652                         }
1653                 };
1654                 let res = channel.get_open_channel(self.genesis_hash.clone());
1655
1656                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
1657                 // We want to make sure the lock is actually acquired by PersistenceNotifierGuard.
1658                 debug_assert!(&self.total_consistency_lock.try_write().is_err());
1659
1660                 let temporary_channel_id = channel.channel_id();
1661                 let mut channel_state = self.channel_state.lock().unwrap();
1662                 match channel_state.by_id.entry(temporary_channel_id) {
1663                         hash_map::Entry::Occupied(_) => {
1664                                 if cfg!(fuzzing) {
1665                                         return Err(APIError::APIMisuseError { err: "Fuzzy bad RNG".to_owned() });
1666                                 } else {
1667                                         panic!("RNG is bad???");
1668                                 }
1669                         },
1670                         hash_map::Entry::Vacant(entry) => { entry.insert(channel); }
1671                 }
1672                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
1673                         node_id: their_network_key,
1674                         msg: res,
1675                 });
1676                 Ok(temporary_channel_id)
1677         }
1678
1679         fn list_channels_with_filter<Fn: FnMut(&(&[u8; 32], &Channel<Signer>)) -> bool>(&self, f: Fn) -> Vec<ChannelDetails> {
1680                 let mut res = Vec::new();
1681                 {
1682                         let channel_state = self.channel_state.lock().unwrap();
1683                         res.reserve(channel_state.by_id.len());
1684                         for (channel_id, channel) in channel_state.by_id.iter().filter(f) {
1685                                 let balance = channel.get_available_balances();
1686                                 let (to_remote_reserve_satoshis, to_self_reserve_satoshis) =
1687                                         channel.get_holder_counterparty_selected_channel_reserve_satoshis();
1688                                 res.push(ChannelDetails {
1689                                         channel_id: (*channel_id).clone(),
1690                                         counterparty: ChannelCounterparty {
1691                                                 node_id: channel.get_counterparty_node_id(),
1692                                                 features: InitFeatures::empty(),
1693                                                 unspendable_punishment_reserve: to_remote_reserve_satoshis,
1694                                                 forwarding_info: channel.counterparty_forwarding_info(),
1695                                                 // Ensures that we have actually received the `htlc_minimum_msat` value
1696                                                 // from the counterparty through the `OpenChannel` or `AcceptChannel`
1697                                                 // message (as they are always the first message from the counterparty).
1698                                                 // Else `Channel::get_counterparty_htlc_minimum_msat` could return the
1699                                                 // default `0` value set by `Channel::new_outbound`.
1700                                                 outbound_htlc_minimum_msat: if channel.have_received_message() {
1701                                                         Some(channel.get_counterparty_htlc_minimum_msat()) } else { None },
1702                                                 outbound_htlc_maximum_msat: channel.get_counterparty_htlc_maximum_msat(),
1703                                         },
1704                                         funding_txo: channel.get_funding_txo(),
1705                                         // Note that accept_channel (or open_channel) is always the first message, so
1706                                         // `have_received_message` indicates that type negotiation has completed.
1707                                         channel_type: if channel.have_received_message() { Some(channel.get_channel_type().clone()) } else { None },
1708                                         short_channel_id: channel.get_short_channel_id(),
1709                                         inbound_scid_alias: channel.latest_inbound_scid_alias(),
1710                                         channel_value_satoshis: channel.get_value_satoshis(),
1711                                         unspendable_punishment_reserve: to_self_reserve_satoshis,
1712                                         balance_msat: balance.balance_msat,
1713                                         inbound_capacity_msat: balance.inbound_capacity_msat,
1714                                         outbound_capacity_msat: balance.outbound_capacity_msat,
1715                                         next_outbound_htlc_limit_msat: balance.next_outbound_htlc_limit_msat,
1716                                         user_channel_id: channel.get_user_id(),
1717                                         confirmations_required: channel.minimum_depth(),
1718                                         force_close_spend_delay: channel.get_counterparty_selected_contest_delay(),
1719                                         is_outbound: channel.is_outbound(),
1720                                         is_funding_locked: channel.is_usable(),
1721                                         is_usable: channel.is_live(),
1722                                         is_public: channel.should_announce(),
1723                                         inbound_htlc_minimum_msat: Some(channel.get_holder_htlc_minimum_msat()),
1724                                         inbound_htlc_maximum_msat: channel.get_holder_htlc_maximum_msat()
1725                                 });
1726                         }
1727                 }
1728                 let per_peer_state = self.per_peer_state.read().unwrap();
1729                 for chan in res.iter_mut() {
1730                         if let Some(peer_state) = per_peer_state.get(&chan.counterparty.node_id) {
1731                                 chan.counterparty.features = peer_state.lock().unwrap().latest_features.clone();
1732                         }
1733                 }
1734                 res
1735         }
1736
1737         /// Gets the list of open channels, in random order. See ChannelDetail field documentation for
1738         /// more information.
1739         pub fn list_channels(&self) -> Vec<ChannelDetails> {
1740                 self.list_channels_with_filter(|_| true)
1741         }
1742
1743         /// Gets the list of usable channels, in random order. Useful as an argument to
1744         /// get_route to ensure non-announced channels are used.
1745         ///
1746         /// These are guaranteed to have their [`ChannelDetails::is_usable`] value set to true, see the
1747         /// documentation for [`ChannelDetails::is_usable`] for more info on exactly what the criteria
1748         /// are.
1749         pub fn list_usable_channels(&self) -> Vec<ChannelDetails> {
1750                 // Note we use is_live here instead of usable which leads to somewhat confused
1751                 // internal/external nomenclature, but that's ok cause that's probably what the user
1752                 // really wanted anyway.
1753                 self.list_channels_with_filter(|&(_, ref channel)| channel.is_live())
1754         }
1755
1756         /// Helper function that issues the channel close events
1757         fn issue_channel_close_events(&self, channel: &Channel<Signer>, closure_reason: ClosureReason) {
1758                 let mut pending_events_lock = self.pending_events.lock().unwrap();
1759                 match channel.unbroadcasted_funding() {
1760                         Some(transaction) => {
1761                                 pending_events_lock.push(events::Event::DiscardFunding { channel_id: channel.channel_id(), transaction })
1762                         },
1763                         None => {},
1764                 }
1765                 pending_events_lock.push(events::Event::ChannelClosed {
1766                         channel_id: channel.channel_id(),
1767                         user_channel_id: channel.get_user_id(),
1768                         reason: closure_reason
1769                 });
1770         }
1771
1772         fn close_channel_internal(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option<u32>) -> Result<(), APIError> {
1773                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
1774
1775                 let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
1776                 let result: Result<(), _> = loop {
1777                         let mut channel_state_lock = self.channel_state.lock().unwrap();
1778                         let channel_state = &mut *channel_state_lock;
1779                         match channel_state.by_id.entry(channel_id.clone()) {
1780                                 hash_map::Entry::Occupied(mut chan_entry) => {
1781                                         if *counterparty_node_id != chan_entry.get().get_counterparty_node_id(){
1782                                                 return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
1783                                         }
1784                                         let per_peer_state = self.per_peer_state.read().unwrap();
1785                                         let (shutdown_msg, monitor_update, htlcs) = match per_peer_state.get(&counterparty_node_id) {
1786                                                 Some(peer_state) => {
1787                                                         let peer_state = peer_state.lock().unwrap();
1788                                                         let their_features = &peer_state.latest_features;
1789                                                         chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)?
1790                                                 },
1791                                                 None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }),
1792                                         };
1793                                         failed_htlcs = htlcs;
1794
1795                                         // Update the monitor with the shutdown script if necessary.
1796                                         if let Some(monitor_update) = monitor_update {
1797                                                 if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
1798                                                         let (result, is_permanent) =
1799                                                                 handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
1800                                                         if is_permanent {
1801                                                                 remove_channel!(self, channel_state, chan_entry);
1802                                                                 break result;
1803                                                         }
1804                                                 }
1805                                         }
1806
1807                                         channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
1808                                                 node_id: *counterparty_node_id,
1809                                                 msg: shutdown_msg
1810                                         });
1811
1812                                         if chan_entry.get().is_shutdown() {
1813                                                 let channel = remove_channel!(self, channel_state, chan_entry);
1814                                                 if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
1815                                                         channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1816                                                                 msg: channel_update
1817                                                         });
1818                                                 }
1819                                                 self.issue_channel_close_events(&channel, ClosureReason::HolderForceClosed);
1820                                         }
1821                                         break Ok(());
1822                                 },
1823                                 hash_map::Entry::Vacant(_) => return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()})
1824                         }
1825                 };
1826
1827                 for htlc_source in failed_htlcs.drain(..) {
1828                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
1829                 }
1830
1831                 let _ = handle_error!(self, result, *counterparty_node_id);
1832                 Ok(())
1833         }
1834
1835         /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
1836         /// will be accepted on the given channel, and after additional timeout/the closing of all
1837         /// pending HTLCs, the channel will be closed on chain.
1838         ///
1839         ///  * If we are the channel initiator, we will pay between our [`Background`] and
1840         ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
1841         ///    estimate.
1842         ///  * If our counterparty is the channel initiator, we will require a channel closing
1843         ///    transaction feerate of at least our [`Background`] feerate or the feerate which
1844         ///    would appear on a force-closure transaction, whichever is lower. We will allow our
1845         ///    counterparty to pay as much fee as they'd like, however.
1846         ///
1847         /// May generate a SendShutdown message event on success, which should be relayed.
1848         ///
1849         /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
1850         /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
1851         /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
1852         pub fn close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
1853                 self.close_channel_internal(channel_id, counterparty_node_id, None)
1854         }
1855
1856         /// Begins the process of closing a channel. After this call (plus some timeout), no new HTLCs
1857         /// will be accepted on the given channel, and after additional timeout/the closing of all
1858         /// pending HTLCs, the channel will be closed on chain.
1859         ///
1860         /// `target_feerate_sat_per_1000_weight` has different meanings depending on if we initiated
1861         /// the channel being closed or not:
1862         ///  * If we are the channel initiator, we will pay at least this feerate on the closing
1863         ///    transaction. The upper-bound is set by
1864         ///    [`ChannelConfig::force_close_avoidance_max_fee_satoshis`] plus our [`Normal`] fee
1865         ///    estimate (or `target_feerate_sat_per_1000_weight`, if it is greater).
1866         ///  * If our counterparty is the channel initiator, we will refuse to accept a channel closure
1867         ///    transaction feerate below `target_feerate_sat_per_1000_weight` (or the feerate which
1868         ///    will appear on a force-closure transaction, whichever is lower).
1869         ///
1870         /// May generate a SendShutdown message event on success, which should be relayed.
1871         ///
1872         /// [`ChannelConfig::force_close_avoidance_max_fee_satoshis`]: crate::util::config::ChannelConfig::force_close_avoidance_max_fee_satoshis
1873         /// [`Background`]: crate::chain::chaininterface::ConfirmationTarget::Background
1874         /// [`Normal`]: crate::chain::chaininterface::ConfirmationTarget::Normal
1875         pub fn close_channel_with_target_feerate(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: u32) -> Result<(), APIError> {
1876                 self.close_channel_internal(channel_id, counterparty_node_id, Some(target_feerate_sats_per_1000_weight))
1877         }
1878
1879         #[inline]
1880         fn finish_force_close_channel(&self, shutdown_res: ShutdownResult) {
1881                 let (monitor_update_option, mut failed_htlcs) = shutdown_res;
1882                 log_debug!(self.logger, "Finishing force-closure of channel with {} HTLCs to fail", failed_htlcs.len());
1883                 for htlc_source in failed_htlcs.drain(..) {
1884                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
1885                 }
1886                 if let Some((funding_txo, monitor_update)) = monitor_update_option {
1887                         // There isn't anything we can do if we get an update failure - we're already
1888                         // force-closing. The monitor update on the required in-memory copy should broadcast
1889                         // the latest local state, which is the best we can do anyway. Thus, it is safe to
1890                         // ignore the result here.
1891                         let _ = self.chain_monitor.update_channel(funding_txo, monitor_update);
1892                 }
1893         }
1894
1895         /// `peer_msg` should be set when we receive a message from a peer, but not set when the
1896         /// user closes, which will be re-exposed as the `ChannelClosed` reason.
1897         fn force_close_channel_with_peer(&self, channel_id: &[u8; 32], peer_node_id: &PublicKey, peer_msg: Option<&String>) -> Result<PublicKey, APIError> {
1898                 let mut chan = {
1899                         let mut channel_state_lock = self.channel_state.lock().unwrap();
1900                         let channel_state = &mut *channel_state_lock;
1901                         if let hash_map::Entry::Occupied(chan) = channel_state.by_id.entry(channel_id.clone()) {
1902                                 if chan.get().get_counterparty_node_id() != *peer_node_id {
1903                                         return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
1904                                 }
1905                                 if let Some(peer_msg) = peer_msg {
1906                                         self.issue_channel_close_events(chan.get(),ClosureReason::CounterpartyForceClosed { peer_msg: peer_msg.to_string() });
1907                                 } else {
1908                                         self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
1909                                 }
1910                                 remove_channel!(self, channel_state, chan)
1911                         } else {
1912                                 return Err(APIError::ChannelUnavailable{err: "No such channel".to_owned()});
1913                         }
1914                 };
1915                 log_error!(self.logger, "Force-closing channel {}", log_bytes!(channel_id[..]));
1916                 self.finish_force_close_channel(chan.force_shutdown(true));
1917                 if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
1918                         let mut channel_state = self.channel_state.lock().unwrap();
1919                         channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
1920                                 msg: update
1921                         });
1922                 }
1923
1924                 Ok(chan.get_counterparty_node_id())
1925         }
1926
1927         /// Force closes a channel, immediately broadcasting the latest local commitment transaction to
1928         /// the chain and rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
1929         /// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
1930         /// channel.
1931         pub fn force_close_channel(&self, channel_id: &[u8; 32], counterparty_node_id: &PublicKey) -> Result<(), APIError> {
1932                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
1933                 match self.force_close_channel_with_peer(channel_id, counterparty_node_id, None) {
1934                         Ok(counterparty_node_id) => {
1935                                 self.channel_state.lock().unwrap().pending_msg_events.push(
1936                                         events::MessageSendEvent::HandleError {
1937                                                 node_id: counterparty_node_id,
1938                                                 action: msgs::ErrorAction::SendErrorMessage {
1939                                                         msg: msgs::ErrorMessage { channel_id: *channel_id, data: "Channel force-closed".to_owned() }
1940                                                 },
1941                                         }
1942                                 );
1943                                 Ok(())
1944                         },
1945                         Err(e) => Err(e)
1946                 }
1947         }
1948
1949         /// Force close all channels, immediately broadcasting the latest local commitment transaction
1950         /// for each to the chain and rejecting new HTLCs on each.
1951         pub fn force_close_all_channels(&self) {
1952                 for chan in self.list_channels() {
1953                         let _ = self.force_close_channel(&chan.channel_id, &chan.counterparty.node_id);
1954                 }
1955         }
1956
1957         fn construct_recv_pending_htlc_info(&self, hop_data: msgs::OnionHopData, shared_secret: [u8; 32],
1958                 payment_hash: PaymentHash, amt_msat: u64, cltv_expiry: u32, phantom_shared_secret: Option<[u8; 32]>) -> Result<PendingHTLCInfo, ReceiveError>
1959         {
1960                 // final_incorrect_cltv_expiry
1961                 if hop_data.outgoing_cltv_value != cltv_expiry {
1962                         return Err(ReceiveError {
1963                                 msg: "Upstream node set CLTV to the wrong value",
1964                                 err_code: 18,
1965                                 err_data: byte_utils::be32_to_array(cltv_expiry).to_vec()
1966                         })
1967                 }
1968                 // final_expiry_too_soon
1969                 // We have to have some headroom to broadcast on chain if we have the preimage, so make sure
1970                 // we have at least HTLC_FAIL_BACK_BUFFER blocks to go.
1971                 // Also, ensure that, in the case of an unknown preimage for the received payment hash, our
1972                 // payment logic has enough time to fail the HTLC backward before our onchain logic triggers a
1973                 // channel closure (see HTLC_FAIL_BACK_BUFFER rationale).
1974                 if (hop_data.outgoing_cltv_value as u64) <= self.best_block.read().unwrap().height() as u64 + HTLC_FAIL_BACK_BUFFER as u64 + 1  {
1975                         return Err(ReceiveError {
1976                                 err_code: 17,
1977                                 err_data: Vec::new(),
1978                                 msg: "The final CLTV expiry is too soon to handle",
1979                         });
1980                 }
1981                 if hop_data.amt_to_forward > amt_msat {
1982                         return Err(ReceiveError {
1983                                 err_code: 19,
1984                                 err_data: byte_utils::be64_to_array(amt_msat).to_vec(),
1985                                 msg: "Upstream node sent less than we were supposed to receive in payment",
1986                         });
1987                 }
1988
1989                 let routing = match hop_data.format {
1990                         msgs::OnionHopDataFormat::Legacy { .. } => {
1991                                 return Err(ReceiveError {
1992                                         err_code: 0x4000|0x2000|3,
1993                                         err_data: Vec::new(),
1994                                         msg: "We require payment_secrets",
1995                                 });
1996                         },
1997                         msgs::OnionHopDataFormat::NonFinalNode { .. } => {
1998                                 return Err(ReceiveError {
1999                                         err_code: 0x4000|22,
2000                                         err_data: Vec::new(),
2001                                         msg: "Got non final data with an HMAC of 0",
2002                                 });
2003                         },
2004                         msgs::OnionHopDataFormat::FinalNode { payment_data, keysend_preimage } => {
2005                                 if payment_data.is_some() && keysend_preimage.is_some() {
2006                                         return Err(ReceiveError {
2007                                                 err_code: 0x4000|22,
2008                                                 err_data: Vec::new(),
2009                                                 msg: "We don't support MPP keysend payments",
2010                                         });
2011                                 } else if let Some(data) = payment_data {
2012                                         PendingHTLCRouting::Receive {
2013                                                 payment_data: data,
2014                                                 incoming_cltv_expiry: hop_data.outgoing_cltv_value,
2015                                                 phantom_shared_secret,
2016                                         }
2017                                 } else if let Some(payment_preimage) = keysend_preimage {
2018                                         // We need to check that the sender knows the keysend preimage before processing this
2019                                         // payment further. Otherwise, an intermediary routing hop forwarding non-keysend-HTLC X
2020                                         // could discover the final destination of X, by probing the adjacent nodes on the route
2021                                         // with a keysend payment of identical payment hash to X and observing the processing
2022                                         // time discrepancies due to a hash collision with X.
2023                                         let hashed_preimage = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
2024                                         if hashed_preimage != payment_hash {
2025                                                 return Err(ReceiveError {
2026                                                         err_code: 0x4000|22,
2027                                                         err_data: Vec::new(),
2028                                                         msg: "Payment preimage didn't match payment hash",
2029                                                 });
2030                                         }
2031
2032                                         PendingHTLCRouting::ReceiveKeysend {
2033                                                 payment_preimage,
2034                                                 incoming_cltv_expiry: hop_data.outgoing_cltv_value,
2035                                         }
2036                                 } else {
2037                                         return Err(ReceiveError {
2038                                                 err_code: 0x4000|0x2000|3,
2039                                                 err_data: Vec::new(),
2040                                                 msg: "We require payment_secrets",
2041                                         });
2042                                 }
2043                         },
2044                 };
2045                 Ok(PendingHTLCInfo {
2046                         routing,
2047                         payment_hash,
2048                         incoming_shared_secret: shared_secret,
2049                         amt_to_forward: amt_msat,
2050                         outgoing_cltv_value: hop_data.outgoing_cltv_value,
2051                 })
2052         }
2053
2054         fn decode_update_add_htlc_onion(&self, msg: &msgs::UpdateAddHTLC) -> (PendingHTLCStatus, MutexGuard<ChannelHolder<Signer>>) {
2055                 macro_rules! return_malformed_err {
2056                         ($msg: expr, $err_code: expr) => {
2057                                 {
2058                                         log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2059                                         return (PendingHTLCStatus::Fail(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
2060                                                 channel_id: msg.channel_id,
2061                                                 htlc_id: msg.htlc_id,
2062                                                 sha256_of_onion: Sha256::hash(&msg.onion_routing_packet.hop_data).into_inner(),
2063                                                 failure_code: $err_code,
2064                                         })), self.channel_state.lock().unwrap());
2065                                 }
2066                         }
2067                 }
2068
2069                 if let Err(_) = msg.onion_routing_packet.public_key {
2070                         return_malformed_err!("invalid ephemeral pubkey", 0x8000 | 0x4000 | 6);
2071                 }
2072
2073                 let shared_secret = SharedSecret::new(&msg.onion_routing_packet.public_key.unwrap(), &self.our_network_key).secret_bytes();
2074
2075                 if msg.onion_routing_packet.version != 0 {
2076                         //TODO: Spec doesn't indicate if we should only hash hop_data here (and in other
2077                         //sha256_of_onion error data packets), or the entire onion_routing_packet. Either way,
2078                         //the hash doesn't really serve any purpose - in the case of hashing all data, the
2079                         //receiving node would have to brute force to figure out which version was put in the
2080                         //packet by the node that send us the message, in the case of hashing the hop_data, the
2081                         //node knows the HMAC matched, so they already know what is there...
2082                         return_malformed_err!("Unknown onion packet version", 0x8000 | 0x4000 | 4);
2083                 }
2084
2085                 let mut channel_state = None;
2086                 macro_rules! return_err {
2087                         ($msg: expr, $err_code: expr, $data: expr) => {
2088                                 {
2089                                         log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2090                                         if channel_state.is_none() {
2091                                                 channel_state = Some(self.channel_state.lock().unwrap());
2092                                         }
2093                                         return (PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
2094                                                 channel_id: msg.channel_id,
2095                                                 htlc_id: msg.htlc_id,
2096                                                 reason: onion_utils::build_first_hop_failure_packet(&shared_secret, $err_code, $data),
2097                                         })), channel_state.unwrap());
2098                                 }
2099                         }
2100                 }
2101
2102                 let next_hop = match onion_utils::decode_next_hop(shared_secret, &msg.onion_routing_packet.hop_data[..], msg.onion_routing_packet.hmac, msg.payment_hash) {
2103                         Ok(res) => res,
2104                         Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
2105                                 return_malformed_err!(err_msg, err_code);
2106                         },
2107                         Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
2108                                 return_err!(err_msg, err_code, &[0; 0]);
2109                         },
2110                 };
2111
2112                 let pending_forward_info = match next_hop {
2113                         onion_utils::Hop::Receive(next_hop_data) => {
2114                                 // OUR PAYMENT!
2115                                 match self.construct_recv_pending_htlc_info(next_hop_data, shared_secret, msg.payment_hash, msg.amount_msat, msg.cltv_expiry, None) {
2116                                         Ok(info) => {
2117                                                 // Note that we could obviously respond immediately with an update_fulfill_htlc
2118                                                 // message, however that would leak that we are the recipient of this payment, so
2119                                                 // instead we stay symmetric with the forwarding case, only responding (after a
2120                                                 // delay) once they've send us a commitment_signed!
2121                                                 PendingHTLCStatus::Forward(info)
2122                                         },
2123                                         Err(ReceiveError { err_code, err_data, msg }) => return_err!(msg, err_code, &err_data)
2124                                 }
2125                         },
2126                         onion_utils::Hop::Forward { next_hop_data, next_hop_hmac, new_packet_bytes } => {
2127                                 let mut new_pubkey = msg.onion_routing_packet.public_key.unwrap();
2128
2129                                 let blinding_factor = {
2130                                         let mut sha = Sha256::engine();
2131                                         sha.input(&new_pubkey.serialize()[..]);
2132                                         sha.input(&shared_secret);
2133                                         Sha256::from_engine(sha).into_inner()
2134                                 };
2135
2136                                 let public_key = if let Err(e) = new_pubkey.mul_assign(&self.secp_ctx, &blinding_factor[..]) {
2137                                         Err(e)
2138                                 } else { Ok(new_pubkey) };
2139
2140                                 let outgoing_packet = msgs::OnionPacket {
2141                                         version: 0,
2142                                         public_key,
2143                                         hop_data: new_packet_bytes,
2144                                         hmac: next_hop_hmac.clone(),
2145                                 };
2146
2147                                 let short_channel_id = match next_hop_data.format {
2148                                         msgs::OnionHopDataFormat::Legacy { short_channel_id } => short_channel_id,
2149                                         msgs::OnionHopDataFormat::NonFinalNode { short_channel_id } => short_channel_id,
2150                                         msgs::OnionHopDataFormat::FinalNode { .. } => {
2151                                                 return_err!("Final Node OnionHopData provided for us as an intermediary node", 0x4000 | 22, &[0;0]);
2152                                         },
2153                                 };
2154
2155                                 PendingHTLCStatus::Forward(PendingHTLCInfo {
2156                                         routing: PendingHTLCRouting::Forward {
2157                                                 onion_packet: outgoing_packet,
2158                                                 short_channel_id,
2159                                         },
2160                                         payment_hash: msg.payment_hash.clone(),
2161                                         incoming_shared_secret: shared_secret,
2162                                         amt_to_forward: next_hop_data.amt_to_forward,
2163                                         outgoing_cltv_value: next_hop_data.outgoing_cltv_value,
2164                                 })
2165                         }
2166                 };
2167
2168                 channel_state = Some(self.channel_state.lock().unwrap());
2169                 if let &PendingHTLCStatus::Forward(PendingHTLCInfo { ref routing, ref amt_to_forward, ref outgoing_cltv_value, .. }) = &pending_forward_info {
2170                         // If short_channel_id is 0 here, we'll reject the HTLC as there cannot be a channel
2171                         // with a short_channel_id of 0. This is important as various things later assume
2172                         // short_channel_id is non-0 in any ::Forward.
2173                         if let &PendingHTLCRouting::Forward { ref short_channel_id, .. } = routing {
2174                                 let id_option = channel_state.as_ref().unwrap().short_to_id.get(&short_channel_id).cloned();
2175                                 if let Some((err, code, chan_update)) = loop {
2176                                         let forwarding_id_opt = match id_option {
2177                                                 None => { // unknown_next_peer
2178                                                         // Note that this is likely a timing oracle for detecting whether an scid is a
2179                                                         // phantom.
2180                                                         if fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, *short_channel_id) {
2181                                                                 None
2182                                                         } else {
2183                                                                 break Some(("Don't have available channel for forwarding as requested.", 0x4000 | 10, None));
2184                                                         }
2185                                                 },
2186                                                 Some(id) => Some(id.clone()),
2187                                         };
2188                                         let (chan_update_opt, forwardee_cltv_expiry_delta) = if let Some(forwarding_id) = forwarding_id_opt {
2189                                                 let chan = channel_state.as_mut().unwrap().by_id.get_mut(&forwarding_id).unwrap();
2190                                                 if !chan.should_announce() && !self.default_configuration.accept_forwards_to_priv_channels {
2191                                                         // Note that the behavior here should be identical to the above block - we
2192                                                         // should NOT reveal the existence or non-existence of a private channel if
2193                                                         // we don't allow forwards outbound over them.
2194                                                         break Some(("Refusing to forward to a private channel based on our config.", 0x4000 | 10, None));
2195                                                 }
2196                                                 if chan.get_channel_type().supports_scid_privacy() && *short_channel_id != chan.outbound_scid_alias() {
2197                                                         // `option_scid_alias` (referred to in LDK as `scid_privacy`) means
2198                                                         // "refuse to forward unless the SCID alias was used", so we pretend
2199                                                         // we don't have the channel here.
2200                                                         break Some(("Refusing to forward over real channel SCID as our counterparty requested.", 0x4000 | 10, None));
2201                                                 }
2202                                                 let chan_update_opt = self.get_channel_update_for_onion(*short_channel_id, chan).ok();
2203
2204                                                 // Note that we could technically not return an error yet here and just hope
2205                                                 // that the connection is reestablished or monitor updated by the time we get
2206                                                 // around to doing the actual forward, but better to fail early if we can and
2207                                                 // hopefully an attacker trying to path-trace payments cannot make this occur
2208                                                 // on a small/per-node/per-channel scale.
2209                                                 if !chan.is_live() { // channel_disabled
2210                                                         break Some(("Forwarding channel is not in a ready state.", 0x1000 | 20, chan_update_opt));
2211                                                 }
2212                                                 if *amt_to_forward < chan.get_counterparty_htlc_minimum_msat() { // amount_below_minimum
2213                                                         break Some(("HTLC amount was below the htlc_minimum_msat", 0x1000 | 11, chan_update_opt));
2214                                                 }
2215                                                 let fee = amt_to_forward.checked_mul(chan.get_fee_proportional_millionths() as u64)
2216                                                         .and_then(|prop_fee| { (prop_fee / 1000000)
2217                                                         .checked_add(chan.get_outbound_forwarding_fee_base_msat() as u64) });
2218                                                 if fee.is_none() || msg.amount_msat < fee.unwrap() || (msg.amount_msat - fee.unwrap()) < *amt_to_forward { // fee_insufficient
2219                                                         break Some(("Prior hop has deviated from specified fees parameters or origin node has obsolete ones", 0x1000 | 12, chan_update_opt));
2220                                                 }
2221                                                 (chan_update_opt, chan.get_cltv_expiry_delta())
2222                                         } else { (None, MIN_CLTV_EXPIRY_DELTA) };
2223
2224                                         if (msg.cltv_expiry as u64) < (*outgoing_cltv_value) as u64 + forwardee_cltv_expiry_delta as u64 { // incorrect_cltv_expiry
2225                                                 break Some(("Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta", 0x1000 | 13, chan_update_opt));
2226                                         }
2227                                         let cur_height = self.best_block.read().unwrap().height() + 1;
2228                                         // Theoretically, channel counterparty shouldn't send us a HTLC expiring now,
2229                                         // but we want to be robust wrt to counterparty packet sanitization (see
2230                                         // HTLC_FAIL_BACK_BUFFER rationale).
2231                                         if msg.cltv_expiry <= cur_height + HTLC_FAIL_BACK_BUFFER as u32 { // expiry_too_soon
2232                                                 break Some(("CLTV expiry is too close", 0x1000 | 14, chan_update_opt));
2233                                         }
2234                                         if msg.cltv_expiry > cur_height + CLTV_FAR_FAR_AWAY as u32 { // expiry_too_far
2235                                                 break Some(("CLTV expiry is too far in the future", 21, None));
2236                                         }
2237                                         // If the HTLC expires ~now, don't bother trying to forward it to our
2238                                         // counterparty. They should fail it anyway, but we don't want to bother with
2239                                         // the round-trips or risk them deciding they definitely want the HTLC and
2240                                         // force-closing to ensure they get it if we're offline.
2241                                         // We previously had a much more aggressive check here which tried to ensure
2242                                         // our counterparty receives an HTLC which has *our* risk threshold met on it,
2243                                         // but there is no need to do that, and since we're a bit conservative with our
2244                                         // risk threshold it just results in failing to forward payments.
2245                                         if (*outgoing_cltv_value) as u64 <= (cur_height + LATENCY_GRACE_PERIOD_BLOCKS) as u64 {
2246                                                 break Some(("Outgoing CLTV value is too soon", 0x1000 | 14, chan_update_opt));
2247                                         }
2248
2249                                         break None;
2250                                 }
2251                                 {
2252                                         let mut res = VecWriter(Vec::with_capacity(chan_update.serialized_length() + 2 + 8 + 2));
2253                                         if let Some(chan_update) = chan_update {
2254                                                 if code == 0x1000 | 11 || code == 0x1000 | 12 {
2255                                                         msg.amount_msat.write(&mut res).expect("Writes cannot fail");
2256                                                 }
2257                                                 else if code == 0x1000 | 13 {
2258                                                         msg.cltv_expiry.write(&mut res).expect("Writes cannot fail");
2259                                                 }
2260                                                 else if code == 0x1000 | 20 {
2261                                                         // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
2262                                                         0u16.write(&mut res).expect("Writes cannot fail");
2263                                                 }
2264                                                 (chan_update.serialized_length() as u16 + 2).write(&mut res).expect("Writes cannot fail");
2265                                                 msgs::ChannelUpdate::TYPE.write(&mut res).expect("Writes cannot fail");
2266                                                 chan_update.write(&mut res).expect("Writes cannot fail");
2267                                         }
2268                                         return_err!(err, code, &res.0[..]);
2269                                 }
2270                         }
2271                 }
2272
2273                 (pending_forward_info, channel_state.unwrap())
2274         }
2275
2276         /// Gets the current channel_update for the given channel. This first checks if the channel is
2277         /// public, and thus should be called whenever the result is going to be passed out in a
2278         /// [`MessageSendEvent::BroadcastChannelUpdate`] event.
2279         ///
2280         /// May be called with channel_state already locked!
2281         fn get_channel_update_for_broadcast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
2282                 if !chan.should_announce() {
2283                         return Err(LightningError {
2284                                 err: "Cannot broadcast a channel_update for a private channel".to_owned(),
2285                                 action: msgs::ErrorAction::IgnoreError
2286                         });
2287                 }
2288                 log_trace!(self.logger, "Attempting to generate broadcast channel update for channel {}", log_bytes!(chan.channel_id()));
2289                 self.get_channel_update_for_unicast(chan)
2290         }
2291
2292         /// Gets the current channel_update for the given channel. This does not check if the channel
2293         /// is public (only returning an Err if the channel does not yet have an assigned short_id),
2294         /// and thus MUST NOT be called unless the recipient of the resulting message has already
2295         /// provided evidence that they know about the existence of the channel.
2296         /// May be called with channel_state already locked!
2297         fn get_channel_update_for_unicast(&self, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
2298                 log_trace!(self.logger, "Attempting to generate channel update for channel {}", log_bytes!(chan.channel_id()));
2299                 let short_channel_id = match chan.get_short_channel_id() {
2300                         None => return Err(LightningError{err: "Channel not yet established".to_owned(), action: msgs::ErrorAction::IgnoreError}),
2301                         Some(id) => id,
2302                 };
2303
2304                 self.get_channel_update_for_onion(short_channel_id, chan)
2305         }
2306         fn get_channel_update_for_onion(&self, short_channel_id: u64, chan: &Channel<Signer>) -> Result<msgs::ChannelUpdate, LightningError> {
2307                 log_trace!(self.logger, "Generating channel update for channel {}", log_bytes!(chan.channel_id()));
2308                 let were_node_one = PublicKey::from_secret_key(&self.secp_ctx, &self.our_network_key).serialize()[..] < chan.get_counterparty_node_id().serialize()[..];
2309
2310                 let unsigned = msgs::UnsignedChannelUpdate {
2311                         chain_hash: self.genesis_hash,
2312                         short_channel_id,
2313                         timestamp: chan.get_update_time_counter(),
2314                         flags: (!were_node_one) as u8 | ((!chan.is_live() as u8) << 1),
2315                         cltv_expiry_delta: chan.get_cltv_expiry_delta(),
2316                         htlc_minimum_msat: chan.get_counterparty_htlc_minimum_msat(),
2317                         htlc_maximum_msat: OptionalField::Present(chan.get_announced_htlc_max_msat()),
2318                         fee_base_msat: chan.get_outbound_forwarding_fee_base_msat(),
2319                         fee_proportional_millionths: chan.get_fee_proportional_millionths(),
2320                         excess_data: Vec::new(),
2321                 };
2322
2323                 let msg_hash = Sha256dHash::hash(&unsigned.encode()[..]);
2324                 let sig = self.secp_ctx.sign_ecdsa(&hash_to_message!(&msg_hash[..]), &self.our_network_key);
2325
2326                 Ok(msgs::ChannelUpdate {
2327                         signature: sig,
2328                         contents: unsigned
2329                 })
2330         }
2331
2332         // Only public for testing, this should otherwise never be called direcly
2333         pub(crate) fn send_payment_along_path(&self, path: &Vec<RouteHop>, payment_params: &Option<PaymentParameters>, payment_hash: &PaymentHash, payment_secret: &Option<PaymentSecret>, total_value: u64, cur_height: u32, payment_id: PaymentId, keysend_preimage: &Option<PaymentPreimage>) -> Result<(), APIError> {
2334                 log_trace!(self.logger, "Attempting to send payment for path with next hop {}", path.first().unwrap().short_channel_id);
2335                 let prng_seed = self.keys_manager.get_secure_random_bytes();
2336                 let session_priv_bytes = self.keys_manager.get_secure_random_bytes();
2337                 let session_priv = SecretKey::from_slice(&session_priv_bytes[..]).expect("RNG is busted");
2338
2339                 let onion_keys = onion_utils::construct_onion_keys(&self.secp_ctx, &path, &session_priv)
2340                         .map_err(|_| APIError::RouteError{err: "Pubkey along hop was maliciously selected"})?;
2341                 let (onion_payloads, htlc_msat, htlc_cltv) = onion_utils::build_onion_payloads(path, total_value, payment_secret, cur_height, keysend_preimage)?;
2342                 if onion_utils::route_size_insane(&onion_payloads) {
2343                         return Err(APIError::RouteError{err: "Route size too large considering onion data"});
2344                 }
2345                 let onion_packet = onion_utils::construct_onion_packet(onion_payloads, onion_keys, prng_seed, payment_hash);
2346
2347                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2348
2349                 let err: Result<(), _> = loop {
2350                         let mut channel_lock = self.channel_state.lock().unwrap();
2351
2352                         let mut pending_outbounds = self.pending_outbound_payments.lock().unwrap();
2353                         let payment_entry = pending_outbounds.entry(payment_id);
2354                         if let hash_map::Entry::Occupied(payment) = &payment_entry {
2355                                 if !payment.get().is_retryable() {
2356                                         return Err(APIError::RouteError {
2357                                                 err: "Payment already completed"
2358                                         });
2359                                 }
2360                         }
2361
2362                         let id = match channel_lock.short_to_id.get(&path.first().unwrap().short_channel_id) {
2363                                 None => return Err(APIError::ChannelUnavailable{err: "No channel available with first hop!".to_owned()}),
2364                                 Some(id) => id.clone(),
2365                         };
2366
2367                         macro_rules! insert_outbound_payment {
2368                                 () => {
2369                                         let payment = payment_entry.or_insert_with(|| PendingOutboundPayment::Retryable {
2370                                                 session_privs: HashSet::new(),
2371                                                 pending_amt_msat: 0,
2372                                                 pending_fee_msat: Some(0),
2373                                                 payment_hash: *payment_hash,
2374                                                 payment_secret: *payment_secret,
2375                                                 starting_block_height: self.best_block.read().unwrap().height(),
2376                                                 total_msat: total_value,
2377                                         });
2378                                         assert!(payment.insert(session_priv_bytes, path));
2379                                 }
2380                         }
2381
2382                         let channel_state = &mut *channel_lock;
2383                         if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(id) {
2384                                 match {
2385                                         if chan.get().get_counterparty_node_id() != path.first().unwrap().pubkey {
2386                                                 return Err(APIError::RouteError{err: "Node ID mismatch on first hop!"});
2387                                         }
2388                                         if !chan.get().is_live() {
2389                                                 return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected/pending monitor update!".to_owned()});
2390                                         }
2391                                         break_chan_entry!(self, chan.get_mut().send_htlc_and_commit(
2392                                                 htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute {
2393                                                         path: path.clone(),
2394                                                         session_priv: session_priv.clone(),
2395                                                         first_hop_htlc_msat: htlc_msat,
2396                                                         payment_id,
2397                                                         payment_secret: payment_secret.clone(),
2398                                                         payment_params: payment_params.clone(),
2399                                                 }, onion_packet, &self.logger),
2400                                         channel_state, chan)
2401                                 } {
2402                                         Some((update_add, commitment_signed, monitor_update)) => {
2403                                                 if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
2404                                                         maybe_break_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true);
2405                                                         // Note that MonitorUpdateFailed here indicates (per function docs)
2406                                                         // that we will resend the commitment update once monitor updating
2407                                                         // is restored. Therefore, we must return an error indicating that
2408                                                         // it is unsafe to retry the payment wholesale, which we do in the
2409                                                         // send_payment check for MonitorUpdateFailed, below.
2410                                                         insert_outbound_payment!(); // Only do this after possibly break'ing on Perm failure above.
2411                                                         return Err(APIError::MonitorUpdateFailed);
2412                                                 }
2413                                                 insert_outbound_payment!();
2414
2415                                                 log_debug!(self.logger, "Sending payment along path resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id()));
2416                                                 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
2417                                                         node_id: path.first().unwrap().pubkey,
2418                                                         updates: msgs::CommitmentUpdate {
2419                                                                 update_add_htlcs: vec![update_add],
2420                                                                 update_fulfill_htlcs: Vec::new(),
2421                                                                 update_fail_htlcs: Vec::new(),
2422                                                                 update_fail_malformed_htlcs: Vec::new(),
2423                                                                 update_fee: None,
2424                                                                 commitment_signed,
2425                                                         },
2426                                                 });
2427                                         },
2428                                         None => { insert_outbound_payment!(); },
2429                                 }
2430                         } else { unreachable!(); }
2431                         return Ok(());
2432                 };
2433
2434                 match handle_error!(self, err, path.first().unwrap().pubkey) {
2435                         Ok(_) => unreachable!(),
2436                         Err(e) => {
2437                                 Err(APIError::ChannelUnavailable { err: e.err })
2438                         },
2439                 }
2440         }
2441
2442         /// Sends a payment along a given route.
2443         ///
2444         /// Value parameters are provided via the last hop in route, see documentation for RouteHop
2445         /// fields for more info.
2446         ///
2447         /// Note that if the payment_hash already exists elsewhere (eg you're sending a duplicative
2448         /// payment), we don't do anything to stop you! We always try to ensure that if the provided
2449         /// next hop knows the preimage to payment_hash they can claim an additional amount as
2450         /// specified in the last hop in the route! Thus, you should probably do your own
2451         /// payment_preimage tracking (which you should already be doing as they represent "proof of
2452         /// payment") and prevent double-sends yourself.
2453         ///
2454         /// May generate SendHTLCs message(s) event on success, which should be relayed.
2455         ///
2456         /// Each path may have a different return value, and PaymentSendValue may return a Vec with
2457         /// each entry matching the corresponding-index entry in the route paths, see
2458         /// PaymentSendFailure for more info.
2459         ///
2460         /// In general, a path may raise:
2461         ///  * APIError::RouteError when an invalid route or forwarding parameter (cltv_delta, fee,
2462         ///    node public key) is specified.
2463         ///  * APIError::ChannelUnavailable if the next-hop channel is not available for updates
2464         ///    (including due to previous monitor update failure or new permanent monitor update
2465         ///    failure).
2466         ///  * APIError::MonitorUpdateFailed if a new monitor update failure prevented sending the
2467         ///    relevant updates.
2468         ///
2469         /// Note that depending on the type of the PaymentSendFailure the HTLC may have been
2470         /// irrevocably committed to on our end. In such a case, do NOT retry the payment with a
2471         /// different route unless you intend to pay twice!
2472         ///
2473         /// payment_secret is unrelated to payment_hash (or PaymentPreimage) and exists to authenticate
2474         /// the sender to the recipient and prevent payment-probing (deanonymization) attacks. For
2475         /// newer nodes, it will be provided to you in the invoice. If you do not have one, the Route
2476         /// must not contain multiple paths as multi-path payments require a recipient-provided
2477         /// payment_secret.
2478         /// If a payment_secret *is* provided, we assume that the invoice had the payment_secret feature
2479         /// bit set (either as required or as available). If multiple paths are present in the Route,
2480         /// we assume the invoice had the basic_mpp feature set.
2481         pub fn send_payment(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>) -> Result<PaymentId, PaymentSendFailure> {
2482                 self.send_payment_internal(route, payment_hash, payment_secret, None, None, None)
2483         }
2484
2485         fn send_payment_internal(&self, route: &Route, payment_hash: PaymentHash, payment_secret: &Option<PaymentSecret>, keysend_preimage: Option<PaymentPreimage>, payment_id: Option<PaymentId>, recv_value_msat: Option<u64>) -> Result<PaymentId, PaymentSendFailure> {
2486                 if route.paths.len() < 1 {
2487                         return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "There must be at least one path to send over"}));
2488                 }
2489                 if route.paths.len() > 10 {
2490                         // This limit is completely arbitrary - there aren't any real fundamental path-count
2491                         // limits. After we support retrying individual paths we should likely bump this, but
2492                         // for now more than 10 paths likely carries too much one-path failure.
2493                         return Err(PaymentSendFailure::ParameterError(APIError::RouteError{err: "Sending over more than 10 paths is not currently supported"}));
2494                 }
2495                 if payment_secret.is_none() && route.paths.len() > 1 {
2496                         return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError{err: "Payment secret is required for multi-path payments".to_string()}));
2497                 }
2498                 let mut total_value = 0;
2499                 let our_node_id = self.get_our_node_id();
2500                 let mut path_errs = Vec::with_capacity(route.paths.len());
2501                 let payment_id = if let Some(id) = payment_id { id } else { PaymentId(self.keys_manager.get_secure_random_bytes()) };
2502                 'path_check: for path in route.paths.iter() {
2503                         if path.len() < 1 || path.len() > 20 {
2504                                 path_errs.push(Err(APIError::RouteError{err: "Path didn't go anywhere/had bogus size"}));
2505                                 continue 'path_check;
2506                         }
2507                         for (idx, hop) in path.iter().enumerate() {
2508                                 if idx != path.len() - 1 && hop.pubkey == our_node_id {
2509                                         path_errs.push(Err(APIError::RouteError{err: "Path went through us but wasn't a simple rebalance loop to us"}));
2510                                         continue 'path_check;
2511                                 }
2512                         }
2513                         total_value += path.last().unwrap().fee_msat;
2514                         path_errs.push(Ok(()));
2515                 }
2516                 if path_errs.iter().any(|e| e.is_err()) {
2517                         return Err(PaymentSendFailure::PathParameterError(path_errs));
2518                 }
2519                 if let Some(amt_msat) = recv_value_msat {
2520                         debug_assert!(amt_msat >= total_value);
2521                         total_value = amt_msat;
2522                 }
2523
2524                 let cur_height = self.best_block.read().unwrap().height() + 1;
2525                 let mut results = Vec::new();
2526                 for path in route.paths.iter() {
2527                         results.push(self.send_payment_along_path(&path, &route.payment_params, &payment_hash, payment_secret, total_value, cur_height, payment_id, &keysend_preimage));
2528                 }
2529                 let mut has_ok = false;
2530                 let mut has_err = false;
2531                 let mut pending_amt_unsent = 0;
2532                 let mut max_unsent_cltv_delta = 0;
2533                 for (res, path) in results.iter().zip(route.paths.iter()) {
2534                         if res.is_ok() { has_ok = true; }
2535                         if res.is_err() { has_err = true; }
2536                         if let &Err(APIError::MonitorUpdateFailed) = res {
2537                                 // MonitorUpdateFailed is inherently unsafe to retry, so we call it a
2538                                 // PartialFailure.
2539                                 has_err = true;
2540                                 has_ok = true;
2541                         } else if res.is_err() {
2542                                 pending_amt_unsent += path.last().unwrap().fee_msat;
2543                                 max_unsent_cltv_delta = cmp::max(max_unsent_cltv_delta, path.last().unwrap().cltv_expiry_delta);
2544                         }
2545                 }
2546                 if has_err && has_ok {
2547                         Err(PaymentSendFailure::PartialFailure {
2548                                 results,
2549                                 payment_id,
2550                                 failed_paths_retry: if pending_amt_unsent != 0 {
2551                                         if let Some(payment_params) = &route.payment_params {
2552                                                 Some(RouteParameters {
2553                                                         payment_params: payment_params.clone(),
2554                                                         final_value_msat: pending_amt_unsent,
2555                                                         final_cltv_expiry_delta: max_unsent_cltv_delta,
2556                                                 })
2557                                         } else { None }
2558                                 } else { None },
2559                         })
2560                 } else if has_err {
2561                         // If we failed to send any paths, we shouldn't have inserted the new PaymentId into
2562                         // our `pending_outbound_payments` map at all.
2563                         debug_assert!(self.pending_outbound_payments.lock().unwrap().get(&payment_id).is_none());
2564                         Err(PaymentSendFailure::AllFailedRetrySafe(results.drain(..).map(|r| r.unwrap_err()).collect()))
2565                 } else {
2566                         Ok(payment_id)
2567                 }
2568         }
2569
2570         /// Retries a payment along the given [`Route`].
2571         ///
2572         /// Errors returned are a superset of those returned from [`send_payment`], so see
2573         /// [`send_payment`] documentation for more details on errors. This method will also error if the
2574         /// retry amount puts the payment more than 10% over the payment's total amount, if the payment
2575         /// for the given `payment_id` cannot be found (likely due to timeout or success), or if
2576         /// further retries have been disabled with [`abandon_payment`].
2577         ///
2578         /// [`send_payment`]: [`ChannelManager::send_payment`]
2579         /// [`abandon_payment`]: [`ChannelManager::abandon_payment`]
2580         pub fn retry_payment(&self, route: &Route, payment_id: PaymentId) -> Result<(), PaymentSendFailure> {
2581                 const RETRY_OVERFLOW_PERCENTAGE: u64 = 10;
2582                 for path in route.paths.iter() {
2583                         if path.len() == 0 {
2584                                 return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
2585                                         err: "length-0 path in route".to_string()
2586                                 }))
2587                         }
2588                 }
2589
2590                 let (total_msat, payment_hash, payment_secret) = {
2591                         let outbounds = self.pending_outbound_payments.lock().unwrap();
2592                         if let Some(payment) = outbounds.get(&payment_id) {
2593                                 match payment {
2594                                         PendingOutboundPayment::Retryable {
2595                                                 total_msat, payment_hash, payment_secret, pending_amt_msat, ..
2596                                         } => {
2597                                                 let retry_amt_msat: u64 = route.paths.iter().map(|path| path.last().unwrap().fee_msat).sum();
2598                                                 if retry_amt_msat + *pending_amt_msat > *total_msat * (100 + RETRY_OVERFLOW_PERCENTAGE) / 100 {
2599                                                         return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
2600                                                                 err: format!("retry_amt_msat of {} will put pending_amt_msat (currently: {}) more than 10% over total_payment_amt_msat of {}", retry_amt_msat, pending_amt_msat, total_msat).to_string()
2601                                                         }))
2602                                                 }
2603                                                 (*total_msat, *payment_hash, *payment_secret)
2604                                         },
2605                                         PendingOutboundPayment::Legacy { .. } => {
2606                                                 return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
2607                                                         err: "Unable to retry payments that were initially sent on LDK versions prior to 0.0.102".to_string()
2608                                                 }))
2609                                         },
2610                                         PendingOutboundPayment::Fulfilled { .. } => {
2611                                                 return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
2612                                                         err: "Payment already completed".to_owned()
2613                                                 }));
2614                                         },
2615                                         PendingOutboundPayment::Abandoned { .. } => {
2616                                                 return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
2617                                                         err: "Payment already abandoned (with some HTLCs still pending)".to_owned()
2618                                                 }));
2619                                         },
2620                                 }
2621                         } else {
2622                                 return Err(PaymentSendFailure::ParameterError(APIError::APIMisuseError {
2623                                         err: format!("Payment with ID {} not found", log_bytes!(payment_id.0)),
2624                                 }))
2625                         }
2626                 };
2627                 return self.send_payment_internal(route, payment_hash, &payment_secret, None, Some(payment_id), Some(total_msat)).map(|_| ())
2628         }
2629
2630         /// Signals that no further retries for the given payment will occur.
2631         ///
2632         /// After this method returns, any future calls to [`retry_payment`] for the given `payment_id`
2633         /// will fail with [`PaymentSendFailure::ParameterError`]. If no such event has been generated,
2634         /// an [`Event::PaymentFailed`] event will be generated as soon as there are no remaining
2635         /// pending HTLCs for this payment.
2636         ///
2637         /// Note that calling this method does *not* prevent a payment from succeeding. You must still
2638         /// wait until you receive either a [`Event::PaymentFailed`] or [`Event::PaymentSent`] event to
2639         /// determine the ultimate status of a payment.
2640         ///
2641         /// [`retry_payment`]: Self::retry_payment
2642         /// [`Event::PaymentFailed`]: events::Event::PaymentFailed
2643         /// [`Event::PaymentSent`]: events::Event::PaymentSent
2644         pub fn abandon_payment(&self, payment_id: PaymentId) {
2645                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2646
2647                 let mut outbounds = self.pending_outbound_payments.lock().unwrap();
2648                 if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
2649                         if let Ok(()) = payment.get_mut().mark_abandoned() {
2650                                 if payment.get().remaining_parts() == 0 {
2651                                         self.pending_events.lock().unwrap().push(events::Event::PaymentFailed {
2652                                                 payment_id,
2653                                                 payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
2654                                         });
2655                                         payment.remove();
2656                                 }
2657                         }
2658                 }
2659         }
2660
2661         /// Send a spontaneous payment, which is a payment that does not require the recipient to have
2662         /// generated an invoice. Optionally, you may specify the preimage. If you do choose to specify
2663         /// the preimage, it must be a cryptographically secure random value that no intermediate node
2664         /// would be able to guess -- otherwise, an intermediate node may claim the payment and it will
2665         /// never reach the recipient.
2666         ///
2667         /// See [`send_payment`] documentation for more details on the return value of this function.
2668         ///
2669         /// Similar to regular payments, you MUST NOT reuse a `payment_preimage` value. See
2670         /// [`send_payment`] for more information about the risks of duplicate preimage usage.
2671         ///
2672         /// Note that `route` must have exactly one path.
2673         ///
2674         /// [`send_payment`]: Self::send_payment
2675         pub fn send_spontaneous_payment(&self, route: &Route, payment_preimage: Option<PaymentPreimage>) -> Result<(PaymentHash, PaymentId), PaymentSendFailure> {
2676                 let preimage = match payment_preimage {
2677                         Some(p) => p,
2678                         None => PaymentPreimage(self.keys_manager.get_secure_random_bytes()),
2679                 };
2680                 let payment_hash = PaymentHash(Sha256::hash(&preimage.0).into_inner());
2681                 match self.send_payment_internal(route, payment_hash, &None, Some(preimage), None, None) {
2682                         Ok(payment_id) => Ok((payment_hash, payment_id)),
2683                         Err(e) => Err(e)
2684                 }
2685         }
2686
2687         /// Handles the generation of a funding transaction, optionally (for tests) with a function
2688         /// which checks the correctness of the funding transaction given the associated channel.
2689         fn funding_transaction_generated_intern<FundingOutput: Fn(&Channel<Signer>, &Transaction) -> Result<OutPoint, APIError>>(
2690                 &self, temporary_channel_id: &[u8; 32], _counterparty_node_id: &PublicKey, funding_transaction: Transaction, find_funding_output: FundingOutput
2691         ) -> Result<(), APIError> {
2692                 let (chan, msg) = {
2693                         let (res, chan) = match self.channel_state.lock().unwrap().by_id.remove(temporary_channel_id) {
2694                                 Some(mut chan) => {
2695                                         let funding_txo = find_funding_output(&chan, &funding_transaction)?;
2696
2697                                         (chan.get_outbound_funding_created(funding_transaction, funding_txo, &self.logger)
2698                                                 .map_err(|e| if let ChannelError::Close(msg) = e {
2699                                                         MsgHandleErrInternal::from_finish_shutdown(msg, chan.channel_id(), chan.get_user_id(), chan.force_shutdown(true), None)
2700                                                 } else { unreachable!(); })
2701                                         , chan)
2702                                 },
2703                                 None => { return Err(APIError::ChannelUnavailable { err: "No such channel".to_owned() }) },
2704                         };
2705                         match handle_error!(self, res, chan.get_counterparty_node_id()) {
2706                                 Ok(funding_msg) => {
2707                                         (chan, funding_msg)
2708                                 },
2709                                 Err(_) => { return Err(APIError::ChannelUnavailable {
2710                                         err: "Error deriving keys or signing initial commitment transactions - either our RNG or our counterparty's RNG is broken or the Signer refused to sign".to_owned()
2711                                 }) },
2712                         }
2713                 };
2714
2715                 let mut channel_state = self.channel_state.lock().unwrap();
2716                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingCreated {
2717                         node_id: chan.get_counterparty_node_id(),
2718                         msg,
2719                 });
2720                 match channel_state.by_id.entry(chan.channel_id()) {
2721                         hash_map::Entry::Occupied(_) => {
2722                                 panic!("Generated duplicate funding txid?");
2723                         },
2724                         hash_map::Entry::Vacant(e) => {
2725                                 e.insert(chan);
2726                         }
2727                 }
2728                 Ok(())
2729         }
2730
2731         #[cfg(test)]
2732         pub(crate) fn funding_transaction_generated_unchecked(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction, output_index: u16) -> Result<(), APIError> {
2733                 self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |_, tx| {
2734                         Ok(OutPoint { txid: tx.txid(), index: output_index })
2735                 })
2736         }
2737
2738         /// Call this upon creation of a funding transaction for the given channel.
2739         ///
2740         /// Returns an [`APIError::APIMisuseError`] if the funding_transaction spent non-SegWit outputs
2741         /// or if no output was found which matches the parameters in [`Event::FundingGenerationReady`].
2742         ///
2743         /// Returns [`APIError::ChannelUnavailable`] if a funding transaction has already been provided
2744         /// for the channel or if the channel has been closed as indicated by [`Event::ChannelClosed`].
2745         ///
2746         /// May panic if the output found in the funding transaction is duplicative with some other
2747         /// channel (note that this should be trivially prevented by using unique funding transaction
2748         /// keys per-channel).
2749         ///
2750         /// Do NOT broadcast the funding transaction yourself. When we have safely received our
2751         /// counterparty's signature the funding transaction will automatically be broadcast via the
2752         /// [`BroadcasterInterface`] provided when this `ChannelManager` was constructed.
2753         ///
2754         /// Note that this includes RBF or similar transaction replacement strategies - lightning does
2755         /// not currently support replacing a funding transaction on an existing channel. Instead,
2756         /// create a new channel with a conflicting funding transaction.
2757         ///
2758         /// [`Event::FundingGenerationReady`]: crate::util::events::Event::FundingGenerationReady
2759         /// [`Event::ChannelClosed`]: crate::util::events::Event::ChannelClosed
2760         pub fn funding_transaction_generated(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, funding_transaction: Transaction) -> Result<(), APIError> {
2761                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2762
2763                 for inp in funding_transaction.input.iter() {
2764                         if inp.witness.is_empty() {
2765                                 return Err(APIError::APIMisuseError {
2766                                         err: "Funding transaction must be fully signed and spend Segwit outputs".to_owned()
2767                                 });
2768                         }
2769                 }
2770                 self.funding_transaction_generated_intern(temporary_channel_id, counterparty_node_id, funding_transaction, |chan, tx| {
2771                         let mut output_index = None;
2772                         let expected_spk = chan.get_funding_redeemscript().to_v0_p2wsh();
2773                         for (idx, outp) in tx.output.iter().enumerate() {
2774                                 if outp.script_pubkey == expected_spk && outp.value == chan.get_value_satoshis() {
2775                                         if output_index.is_some() {
2776                                                 return Err(APIError::APIMisuseError {
2777                                                         err: "Multiple outputs matched the expected script and value".to_owned()
2778                                                 });
2779                                         }
2780                                         if idx > u16::max_value() as usize {
2781                                                 return Err(APIError::APIMisuseError {
2782                                                         err: "Transaction had more than 2^16 outputs, which is not supported".to_owned()
2783                                                 });
2784                                         }
2785                                         output_index = Some(idx as u16);
2786                                 }
2787                         }
2788                         if output_index.is_none() {
2789                                 return Err(APIError::APIMisuseError {
2790                                         err: "No output matched the script_pubkey and value in the FundingGenerationReady event".to_owned()
2791                                 });
2792                         }
2793                         Ok(OutPoint { txid: tx.txid(), index: output_index.unwrap() })
2794                 })
2795         }
2796
2797         #[allow(dead_code)]
2798         // Messages of up to 64KB should never end up more than half full with addresses, as that would
2799         // be absurd. We ensure this by checking that at least 500 (our stated public contract on when
2800         // broadcast_node_announcement panics) of the maximum-length addresses would fit in a 64KB
2801         // message...
2802         const HALF_MESSAGE_IS_ADDRS: u32 = ::core::u16::MAX as u32 / (NetAddress::MAX_LEN as u32 + 1) / 2;
2803         #[deny(const_err)]
2804         #[allow(dead_code)]
2805         // ...by failing to compile if the number of addresses that would be half of a message is
2806         // smaller than 500:
2807         const STATIC_ASSERT: u32 = Self::HALF_MESSAGE_IS_ADDRS - 500;
2808
2809         /// Regenerates channel_announcements and generates a signed node_announcement from the given
2810         /// arguments, providing them in corresponding events via
2811         /// [`get_and_clear_pending_msg_events`], if at least one public channel has been confirmed
2812         /// on-chain. This effectively re-broadcasts all channel announcements and sends our node
2813         /// announcement to ensure that the lightning P2P network is aware of the channels we have and
2814         /// our network addresses.
2815         ///
2816         /// `rgb` is a node "color" and `alias` is a printable human-readable string to describe this
2817         /// node to humans. They carry no in-protocol meaning.
2818         ///
2819         /// `addresses` represent the set (possibly empty) of socket addresses on which this node
2820         /// accepts incoming connections. These will be included in the node_announcement, publicly
2821         /// tying these addresses together and to this node. If you wish to preserve user privacy,
2822         /// addresses should likely contain only Tor Onion addresses.
2823         ///
2824         /// Panics if `addresses` is absurdly large (more than 500).
2825         ///
2826         /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
2827         pub fn broadcast_node_announcement(&self, rgb: [u8; 3], alias: [u8; 32], mut addresses: Vec<NetAddress>) {
2828                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2829
2830                 if addresses.len() > 500 {
2831                         panic!("More than half the message size was taken up by public addresses!");
2832                 }
2833
2834                 // While all existing nodes handle unsorted addresses just fine, the spec requires that
2835                 // addresses be sorted for future compatibility.
2836                 addresses.sort_by_key(|addr| addr.get_id());
2837
2838                 let announcement = msgs::UnsignedNodeAnnouncement {
2839                         features: NodeFeatures::known(),
2840                         timestamp: self.last_node_announcement_serial.fetch_add(1, Ordering::AcqRel) as u32,
2841                         node_id: self.get_our_node_id(),
2842                         rgb, alias, addresses,
2843                         excess_address_data: Vec::new(),
2844                         excess_data: Vec::new(),
2845                 };
2846                 let msghash = hash_to_message!(&Sha256dHash::hash(&announcement.encode()[..])[..]);
2847                 let node_announce_sig = sign(&self.secp_ctx, &msghash, &self.our_network_key);
2848
2849                 let mut channel_state_lock = self.channel_state.lock().unwrap();
2850                 let channel_state = &mut *channel_state_lock;
2851
2852                 let mut announced_chans = false;
2853                 for (_, chan) in channel_state.by_id.iter() {
2854                         if let Some(msg) = chan.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height()) {
2855                                 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
2856                                         msg,
2857                                         update_msg: match self.get_channel_update_for_broadcast(chan) {
2858                                                 Ok(msg) => msg,
2859                                                 Err(_) => continue,
2860                                         },
2861                                 });
2862                                 announced_chans = true;
2863                         } else {
2864                                 // If the channel is not public or has not yet reached funding_locked, check the
2865                                 // next channel. If we don't yet have any public channels, we'll skip the broadcast
2866                                 // below as peers may not accept it without channels on chain first.
2867                         }
2868                 }
2869
2870                 if announced_chans {
2871                         channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastNodeAnnouncement {
2872                                 msg: msgs::NodeAnnouncement {
2873                                         signature: node_announce_sig,
2874                                         contents: announcement
2875                                 },
2876                         });
2877                 }
2878         }
2879
2880         /// Processes HTLCs which are pending waiting on random forward delay.
2881         ///
2882         /// Should only really ever be called in response to a PendingHTLCsForwardable event.
2883         /// Will likely generate further events.
2884         pub fn process_pending_htlc_forwards(&self) {
2885                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
2886
2887                 let mut new_events = Vec::new();
2888                 let mut failed_forwards = Vec::new();
2889                 let mut phantom_receives: Vec<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> = Vec::new();
2890                 let mut handle_errors = Vec::new();
2891                 {
2892                         let mut channel_state_lock = self.channel_state.lock().unwrap();
2893                         let channel_state = &mut *channel_state_lock;
2894
2895                         for (short_chan_id, mut pending_forwards) in channel_state.forward_htlcs.drain() {
2896                                 if short_chan_id != 0 {
2897                                         let forward_chan_id = match channel_state.short_to_id.get(&short_chan_id) {
2898                                                 Some(chan_id) => chan_id.clone(),
2899                                                 None => {
2900                                                         for forward_info in pending_forwards.drain(..) {
2901                                                                 match forward_info {
2902                                                                         HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
2903                                                                                 routing, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
2904                                                                                 prev_funding_outpoint } => {
2905                                                                                         macro_rules! fail_forward {
2906                                                                                                 ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr) => {
2907                                                                                                         {
2908                                                                                                                 log_info!(self.logger, "Failed to accept/forward incoming HTLC: {}", $msg);
2909                                                                                                                 let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
2910                                                                                                                         short_channel_id: prev_short_channel_id,
2911                                                                                                                         outpoint: prev_funding_outpoint,
2912                                                                                                                         htlc_id: prev_htlc_id,
2913                                                                                                                         incoming_packet_shared_secret: incoming_shared_secret,
2914                                                                                                                         phantom_shared_secret: $phantom_ss,
2915                                                                                                                 });
2916                                                                                                                 failed_forwards.push((htlc_source, payment_hash,
2917                                                                                                                         HTLCFailReason::Reason { failure_code: $err_code, data: $err_data }
2918                                                                                                                 ));
2919                                                                                                                 continue;
2920                                                                                                         }
2921                                                                                                 }
2922                                                                                         }
2923                                                                                         if let PendingHTLCRouting::Forward { onion_packet, .. } = routing {
2924                                                                                                 let phantom_secret_res = self.keys_manager.get_node_secret(Recipient::PhantomNode);
2925                                                                                                 if phantom_secret_res.is_ok() && fake_scid::is_valid_phantom(&self.fake_scid_rand_bytes, short_chan_id) {
2926                                                                                                         let phantom_shared_secret = SharedSecret::new(&onion_packet.public_key.unwrap(), &phantom_secret_res.unwrap()).secret_bytes();
2927                                                                                                         let next_hop = match onion_utils::decode_next_hop(phantom_shared_secret, &onion_packet.hop_data, onion_packet.hmac, payment_hash) {
2928                                                                                                                 Ok(res) => res,
2929                                                                                                                 Err(onion_utils::OnionDecodeErr::Malformed { err_msg, err_code }) => {
2930                                                                                                                         let sha256_of_onion = Sha256::hash(&onion_packet.hop_data).into_inner();
2931                                                                                                                         // In this scenario, the phantom would have sent us an
2932                                                                                                                         // `update_fail_malformed_htlc`, meaning here we encrypt the error as
2933                                                                                                                         // if it came from us (the second-to-last hop) but contains the sha256
2934                                                                                                                         // of the onion.
2935                                                                                                                         fail_forward!(err_msg, err_code, sha256_of_onion.to_vec(), None);
2936                                                                                                                 },
2937                                                                                                                 Err(onion_utils::OnionDecodeErr::Relay { err_msg, err_code }) => {
2938                                                                                                                         fail_forward!(err_msg, err_code, Vec::new(), Some(phantom_shared_secret));
2939                                                                                                                 },
2940                                                                                                         };
2941                                                                                                         match next_hop {
2942                                                                                                                 onion_utils::Hop::Receive(hop_data) => {
2943                                                                                                                         match self.construct_recv_pending_htlc_info(hop_data, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value, Some(phantom_shared_secret)) {
2944                                                                                                                                 Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, vec![(info, prev_htlc_id)])),
2945                                                                                                                                 Err(ReceiveError { err_code, err_data, msg }) => fail_forward!(msg, err_code, err_data, Some(phantom_shared_secret))
2946                                                                                                                         }
2947                                                                                                                 },
2948                                                                                                                 _ => panic!(),
2949                                                                                                         }
2950                                                                                                 } else {
2951                                                                                                         fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
2952                                                                                                 }
2953                                                                                         } else {
2954                                                                                                 fail_forward!(format!("Unknown short channel id {} for forward HTLC", short_chan_id), 0x4000 | 10, Vec::new(), None);
2955                                                                                         }
2956                                                                                 },
2957                                                                         HTLCForwardInfo::FailHTLC { .. } => {
2958                                                                                 // Channel went away before we could fail it. This implies
2959                                                                                 // the channel is now on chain and our counterparty is
2960                                                                                 // trying to broadcast the HTLC-Timeout, but that's their
2961                                                                                 // problem, not ours.
2962                                                                         }
2963                                                                 }
2964                                                         }
2965                                                         continue;
2966                                                 }
2967                                         };
2968                                         if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(forward_chan_id) {
2969                                                 let mut add_htlc_msgs = Vec::new();
2970                                                 let mut fail_htlc_msgs = Vec::new();
2971                                                 for forward_info in pending_forwards.drain(..) {
2972                                                         match forward_info {
2973                                                                 HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
2974                                                                                 routing: PendingHTLCRouting::Forward {
2975                                                                                         onion_packet, ..
2976                                                                                 }, incoming_shared_secret, payment_hash, amt_to_forward, outgoing_cltv_value },
2977                                                                                 prev_funding_outpoint } => {
2978                                                                         log_trace!(self.logger, "Adding HTLC from short id {} with payment_hash {} to channel with short id {} after delay", prev_short_channel_id, log_bytes!(payment_hash.0), short_chan_id);
2979                                                                         let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData {
2980                                                                                 short_channel_id: prev_short_channel_id,
2981                                                                                 outpoint: prev_funding_outpoint,
2982                                                                                 htlc_id: prev_htlc_id,
2983                                                                                 incoming_packet_shared_secret: incoming_shared_secret,
2984                                                                                 // Phantom payments are only PendingHTLCRouting::Receive.
2985                                                                                 phantom_shared_secret: None,
2986                                                                         });
2987                                                                         match chan.get_mut().send_htlc(amt_to_forward, payment_hash, outgoing_cltv_value, htlc_source.clone(), onion_packet, &self.logger) {
2988                                                                                 Err(e) => {
2989                                                                                         if let ChannelError::Ignore(msg) = e {
2990                                                                                                 log_trace!(self.logger, "Failed to forward HTLC with payment_hash {}: {}", log_bytes!(payment_hash.0), msg);
2991                                                                                         } else {
2992                                                                                                 panic!("Stated return value requirements in send_htlc() were not met");
2993                                                                                         }
2994                                                                                         let (failure_code, data) = self.get_htlc_temp_fail_err_and_data(0x1000|7, short_chan_id, chan.get());
2995                                                                                         failed_forwards.push((htlc_source, payment_hash,
2996                                                                                                 HTLCFailReason::Reason { failure_code, data }
2997                                                                                         ));
2998                                                                                         continue;
2999                                                                                 },
3000                                                                                 Ok(update_add) => {
3001                                                                                         match update_add {
3002                                                                                                 Some(msg) => { add_htlc_msgs.push(msg); },
3003                                                                                                 None => {
3004                                                                                                         // Nothing to do here...we're waiting on a remote
3005                                                                                                         // revoke_and_ack before we can add anymore HTLCs. The Channel
3006                                                                                                         // will automatically handle building the update_add_htlc and
3007                                                                                                         // commitment_signed messages when we can.
3008                                                                                                         // TODO: Do some kind of timer to set the channel as !is_live()
3009                                                                                                         // as we don't really want others relying on us relaying through
3010                                                                                                         // this channel currently :/.
3011                                                                                                 }
3012                                                                                         }
3013                                                                                 }
3014                                                                         }
3015                                                                 },
3016                                                                 HTLCForwardInfo::AddHTLC { .. } => {
3017                                                                         panic!("short_channel_id != 0 should imply any pending_forward entries are of type Forward");
3018                                                                 },
3019                                                                 HTLCForwardInfo::FailHTLC { htlc_id, err_packet } => {
3020                                                                         log_trace!(self.logger, "Failing HTLC back to channel with short id {} (backward HTLC ID {}) after delay", short_chan_id, htlc_id);
3021                                                                         match chan.get_mut().get_update_fail_htlc(htlc_id, err_packet, &self.logger) {
3022                                                                                 Err(e) => {
3023                                                                                         if let ChannelError::Ignore(msg) = e {
3024                                                                                                 log_trace!(self.logger, "Failed to fail HTLC with ID {} backwards to short_id {}: {}", htlc_id, short_chan_id, msg);
3025                                                                                         } else {
3026                                                                                                 panic!("Stated return value requirements in get_update_fail_htlc() were not met");
3027                                                                                         }
3028                                                                                         // fail-backs are best-effort, we probably already have one
3029                                                                                         // pending, and if not that's OK, if not, the channel is on
3030                                                                                         // the chain and sending the HTLC-Timeout is their problem.
3031                                                                                         continue;
3032                                                                                 },
3033                                                                                 Ok(Some(msg)) => { fail_htlc_msgs.push(msg); },
3034                                                                                 Ok(None) => {
3035                                                                                         // Nothing to do here...we're waiting on a remote
3036                                                                                         // revoke_and_ack before we can update the commitment
3037                                                                                         // transaction. The Channel will automatically handle
3038                                                                                         // building the update_fail_htlc and commitment_signed
3039                                                                                         // messages when we can.
3040                                                                                         // We don't need any kind of timer here as they should fail
3041                                                                                         // the channel onto the chain if they can't get our
3042                                                                                         // update_fail_htlc in time, it's not our problem.
3043                                                                                 }
3044                                                                         }
3045                                                                 },
3046                                                         }
3047                                                 }
3048
3049                                                 if !add_htlc_msgs.is_empty() || !fail_htlc_msgs.is_empty() {
3050                                                         let (commitment_msg, monitor_update) = match chan.get_mut().send_commitment(&self.logger) {
3051                                                                 Ok(res) => res,
3052                                                                 Err(e) => {
3053                                                                         // We surely failed send_commitment due to bad keys, in that case
3054                                                                         // close channel and then send error message to peer.
3055                                                                         let counterparty_node_id = chan.get().get_counterparty_node_id();
3056                                                                         let err: Result<(), _>  = match e {
3057                                                                                 ChannelError::Ignore(_) | ChannelError::Warn(_) => {
3058                                                                                         panic!("Stated return value requirements in send_commitment() were not met");
3059                                                                                 }
3060                                                                                 ChannelError::Close(msg) => {
3061                                                                                         log_trace!(self.logger, "Closing channel {} due to Close-required error: {}", log_bytes!(chan.key()[..]), msg);
3062                                                                                         let mut channel = remove_channel!(self, channel_state, chan);
3063                                                                                         // ChannelClosed event is generated by handle_error for us.
3064                                                                                         Err(MsgHandleErrInternal::from_finish_shutdown(msg, channel.channel_id(), channel.get_user_id(), channel.force_shutdown(true), self.get_channel_update_for_broadcast(&channel).ok()))
3065                                                                                 },
3066                                                                                 ChannelError::CloseDelayBroadcast(_) => { panic!("Wait is only generated on receipt of channel_reestablish, which is handled by try_chan_entry, we don't bother to support it here"); }
3067                                                                         };
3068                                                                         handle_errors.push((counterparty_node_id, err));
3069                                                                         continue;
3070                                                                 }
3071                                                         };
3072                                                         if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
3073                                                                 handle_errors.push((chan.get().get_counterparty_node_id(), handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, true)));
3074                                                                 continue;
3075                                                         }
3076                                                         log_debug!(self.logger, "Forwarding HTLCs resulted in a commitment update with {} HTLCs added and {} HTLCs failed for channel {}",
3077                                                                 add_htlc_msgs.len(), fail_htlc_msgs.len(), log_bytes!(chan.get().channel_id()));
3078                                                         channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
3079                                                                 node_id: chan.get().get_counterparty_node_id(),
3080                                                                 updates: msgs::CommitmentUpdate {
3081                                                                         update_add_htlcs: add_htlc_msgs,
3082                                                                         update_fulfill_htlcs: Vec::new(),
3083                                                                         update_fail_htlcs: fail_htlc_msgs,
3084                                                                         update_fail_malformed_htlcs: Vec::new(),
3085                                                                         update_fee: None,
3086                                                                         commitment_signed: commitment_msg,
3087                                                                 },
3088                                                         });
3089                                                 }
3090                                         } else {
3091                                                 unreachable!();
3092                                         }
3093                                 } else {
3094                                         for forward_info in pending_forwards.drain(..) {
3095                                                 match forward_info {
3096                                                         HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_htlc_id, forward_info: PendingHTLCInfo {
3097                                                                         routing, incoming_shared_secret, payment_hash, amt_to_forward, .. },
3098                                                                         prev_funding_outpoint } => {
3099                                                                 let (cltv_expiry, onion_payload, payment_data, phantom_shared_secret) = match routing {
3100                                                                         PendingHTLCRouting::Receive { payment_data, incoming_cltv_expiry, phantom_shared_secret } => {
3101                                                                                 let _legacy_hop_data = payment_data.clone();
3102                                                                                 (incoming_cltv_expiry, OnionPayload::Invoice { _legacy_hop_data }, Some(payment_data), phantom_shared_secret)
3103                                                                         },
3104                                                                         PendingHTLCRouting::ReceiveKeysend { payment_preimage, incoming_cltv_expiry } =>
3105                                                                                 (incoming_cltv_expiry, OnionPayload::Spontaneous(payment_preimage), None, None),
3106                                                                         _ => {
3107                                                                                 panic!("short_channel_id == 0 should imply any pending_forward entries are of type Receive");
3108                                                                         }
3109                                                                 };
3110                                                                 let claimable_htlc = ClaimableHTLC {
3111                                                                         prev_hop: HTLCPreviousHopData {
3112                                                                                 short_channel_id: prev_short_channel_id,
3113                                                                                 outpoint: prev_funding_outpoint,
3114                                                                                 htlc_id: prev_htlc_id,
3115                                                                                 incoming_packet_shared_secret: incoming_shared_secret,
3116                                                                                 phantom_shared_secret,
3117                                                                         },
3118                                                                         value: amt_to_forward,
3119                                                                         timer_ticks: 0,
3120                                                                         total_msat: if let Some(data) = &payment_data { data.total_msat } else { amt_to_forward },
3121                                                                         cltv_expiry,
3122                                                                         onion_payload,
3123                                                                 };
3124
3125                                                                 macro_rules! fail_htlc {
3126                                                                         ($htlc: expr) => {
3127                                                                                 let mut htlc_msat_height_data = byte_utils::be64_to_array($htlc.value).to_vec();
3128                                                                                 htlc_msat_height_data.extend_from_slice(
3129                                                                                         &byte_utils::be32_to_array(self.best_block.read().unwrap().height()),
3130                                                                                 );
3131                                                                                 failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData {
3132                                                                                                 short_channel_id: $htlc.prev_hop.short_channel_id,
3133                                                                                                 outpoint: prev_funding_outpoint,
3134                                                                                                 htlc_id: $htlc.prev_hop.htlc_id,
3135                                                                                                 incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret,
3136                                                                                                 phantom_shared_secret,
3137                                                                                         }), payment_hash,
3138                                                                                         HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data }
3139                                                                                 ));
3140                                                                         }
3141                                                                 }
3142
3143                                                                 macro_rules! check_total_value {
3144                                                                         ($payment_data: expr, $payment_preimage: expr) => {{
3145                                                                                 let mut payment_received_generated = false;
3146                                                                                 let htlcs = channel_state.claimable_htlcs.entry(payment_hash)
3147                                                                                         .or_insert(Vec::new());
3148                                                                                 if htlcs.len() == 1 {
3149                                                                                         if let OnionPayload::Spontaneous(_) = htlcs[0].onion_payload {
3150                                                                                                 log_trace!(self.logger, "Failing new HTLC with payment_hash {} as we already had an existing keysend HTLC with the same payment hash", log_bytes!(payment_hash.0));
3151                                                                                                 fail_htlc!(claimable_htlc);
3152                                                                                                 continue
3153                                                                                         }
3154                                                                                 }
3155                                                                                 let mut total_value = claimable_htlc.value;
3156                                                                                 for htlc in htlcs.iter() {
3157                                                                                         total_value += htlc.value;
3158                                                                                         match &htlc.onion_payload {
3159                                                                                                 OnionPayload::Invoice { .. } => {
3160                                                                                                         if htlc.total_msat != $payment_data.total_msat {
3161                                                                                                                 log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the HTLCs had inconsistent total values (eg {} and {})",
3162                                                                                                                         log_bytes!(payment_hash.0), $payment_data.total_msat, htlc.total_msat);
3163                                                                                                                 total_value = msgs::MAX_VALUE_MSAT;
3164                                                                                                         }
3165                                                                                                         if total_value >= msgs::MAX_VALUE_MSAT { break; }
3166                                                                                                 },
3167                                                                                                 _ => unreachable!(),
3168                                                                                         }
3169                                                                                 }
3170                                                                                 if total_value >= msgs::MAX_VALUE_MSAT || total_value > $payment_data.total_msat {
3171                                                                                         log_trace!(self.logger, "Failing HTLCs with payment_hash {} as the total value {} ran over expected value {} (or HTLCs were inconsistent)",
3172                                                                                                 log_bytes!(payment_hash.0), total_value, $payment_data.total_msat);
3173                                                                                         fail_htlc!(claimable_htlc);
3174                                                                                 } else if total_value == $payment_data.total_msat {
3175                                                                                         htlcs.push(claimable_htlc);
3176                                                                                         new_events.push(events::Event::PaymentReceived {
3177                                                                                                 payment_hash,
3178                                                                                                 purpose: events::PaymentPurpose::InvoicePayment {
3179                                                                                                         payment_preimage: $payment_preimage,
3180                                                                                                         payment_secret: $payment_data.payment_secret,
3181                                                                                                 },
3182                                                                                                 amt: total_value,
3183                                                                                         });
3184                                                                                         payment_received_generated = true;
3185                                                                                 } else {
3186                                                                                         // Nothing to do - we haven't reached the total
3187                                                                                         // payment value yet, wait until we receive more
3188                                                                                         // MPP parts.
3189                                                                                         htlcs.push(claimable_htlc);
3190                                                                                 }
3191                                                                                 payment_received_generated
3192                                                                         }}
3193                                                                 }
3194
3195                                                                 // Check that the payment hash and secret are known. Note that we
3196                                                                 // MUST take care to handle the "unknown payment hash" and
3197                                                                 // "incorrect payment secret" cases here identically or we'd expose
3198                                                                 // that we are the ultimate recipient of the given payment hash.
3199                                                                 // Further, we must not expose whether we have any other HTLCs
3200                                                                 // associated with the same payment_hash pending or not.
3201                                                                 let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
3202                                                                 match payment_secrets.entry(payment_hash) {
3203                                                                         hash_map::Entry::Vacant(_) => {
3204                                                                                 match claimable_htlc.onion_payload {
3205                                                                                         OnionPayload::Invoice { .. } => {
3206                                                                                                 let payment_data = payment_data.unwrap();
3207                                                                                                 let payment_preimage = match inbound_payment::verify(payment_hash, &payment_data, self.highest_seen_timestamp.load(Ordering::Acquire) as u64, &self.inbound_payment_key, &self.logger) {
3208                                                                                                         Ok(payment_preimage) => payment_preimage,
3209                                                                                                         Err(()) => {
3210                                                                                                                 fail_htlc!(claimable_htlc);
3211                                                                                                                 continue
3212                                                                                                         }
3213                                                                                                 };
3214                                                                                                 check_total_value!(payment_data, payment_preimage);
3215                                                                                         },
3216                                                                                         OnionPayload::Spontaneous(preimage) => {
3217                                                                                                 match channel_state.claimable_htlcs.entry(payment_hash) {
3218                                                                                                         hash_map::Entry::Vacant(e) => {
3219                                                                                                                 e.insert(vec![claimable_htlc]);
3220                                                                                                                 new_events.push(events::Event::PaymentReceived {
3221                                                                                                                         payment_hash,
3222                                                                                                                         amt: amt_to_forward,
3223                                                                                                                         purpose: events::PaymentPurpose::SpontaneousPayment(preimage),
3224                                                                                                                 });
3225                                                                                                         },
3226                                                                                                         hash_map::Entry::Occupied(_) => {
3227                                                                                                                 log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} for a duplicative payment hash", log_bytes!(payment_hash.0));
3228                                                                                                                 fail_htlc!(claimable_htlc);
3229                                                                                                         }
3230                                                                                                 }
3231                                                                                         }
3232                                                                                 }
3233                                                                         },
3234                                                                         hash_map::Entry::Occupied(inbound_payment) => {
3235                                                                                 if payment_data.is_none() {
3236                                                                                         log_trace!(self.logger, "Failing new keysend HTLC with payment_hash {} because we already have an inbound payment with the same payment hash", log_bytes!(payment_hash.0));
3237                                                                                         fail_htlc!(claimable_htlc);
3238                                                                                         continue
3239                                                                                 };
3240                                                                                 let payment_data = payment_data.unwrap();
3241                                                                                 if inbound_payment.get().payment_secret != payment_data.payment_secret {
3242                                                                                         log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our expected payment secret.", log_bytes!(payment_hash.0));
3243                                                                                         fail_htlc!(claimable_htlc);
3244                                                                                 } else if inbound_payment.get().min_value_msat.is_some() && payment_data.total_msat < inbound_payment.get().min_value_msat.unwrap() {
3245                                                                                         log_trace!(self.logger, "Failing new HTLC with payment_hash {} as it didn't match our minimum value (had {}, needed {}).",
3246                                                                                                 log_bytes!(payment_hash.0), payment_data.total_msat, inbound_payment.get().min_value_msat.unwrap());
3247                                                                                         fail_htlc!(claimable_htlc);
3248                                                                                 } else {
3249                                                                                         let payment_received_generated = check_total_value!(payment_data, inbound_payment.get().payment_preimage);
3250                                                                                         if payment_received_generated {
3251                                                                                                 inbound_payment.remove_entry();
3252                                                                                         }
3253                                                                                 }
3254                                                                         },
3255                                                                 };
3256                                                         },
3257                                                         HTLCForwardInfo::FailHTLC { .. } => {
3258                                                                 panic!("Got pending fail of our own HTLC");
3259                                                         }
3260                                                 }
3261                                         }
3262                                 }
3263                         }
3264                 }
3265
3266                 for (htlc_source, payment_hash, failure_reason) in failed_forwards.drain(..) {
3267                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source, &payment_hash, failure_reason);
3268                 }
3269                 self.forward_htlcs(&mut phantom_receives);
3270
3271                 for (counterparty_node_id, err) in handle_errors.drain(..) {
3272                         let _ = handle_error!(self, err, counterparty_node_id);
3273                 }
3274
3275                 if new_events.is_empty() { return }
3276                 let mut events = self.pending_events.lock().unwrap();
3277                 events.append(&mut new_events);
3278         }
3279
3280         /// Free the background events, generally called from timer_tick_occurred.
3281         ///
3282         /// Exposed for testing to allow us to process events quickly without generating accidental
3283         /// BroadcastChannelUpdate events in timer_tick_occurred.
3284         ///
3285         /// Expects the caller to have a total_consistency_lock read lock.
3286         fn process_background_events(&self) -> bool {
3287                 let mut background_events = Vec::new();
3288                 mem::swap(&mut *self.pending_background_events.lock().unwrap(), &mut background_events);
3289                 if background_events.is_empty() {
3290                         return false;
3291                 }
3292
3293                 for event in background_events.drain(..) {
3294                         match event {
3295                                 BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)) => {
3296                                         // The channel has already been closed, so no use bothering to care about the
3297                                         // monitor updating completing.
3298                                         let _ = self.chain_monitor.update_channel(funding_txo, update);
3299                                 },
3300                         }
3301                 }
3302                 true
3303         }
3304
3305         #[cfg(any(test, feature = "_test_utils"))]
3306         /// Process background events, for functional testing
3307         pub fn test_process_background_events(&self) {
3308                 self.process_background_events();
3309         }
3310
3311         fn update_channel_fee(&self, short_to_id: &mut HashMap<u64, [u8; 32]>, pending_msg_events: &mut Vec<events::MessageSendEvent>, chan_id: &[u8; 32], chan: &mut Channel<Signer>, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) {
3312                 if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); }
3313                 // If the feerate has decreased by less than half, don't bother
3314                 if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() {
3315                         log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.",
3316                                 log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
3317                         return (true, NotifyOption::SkipPersist, Ok(()));
3318                 }
3319                 if !chan.is_live() {
3320                         log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).",
3321                                 log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
3322                         return (true, NotifyOption::SkipPersist, Ok(()));
3323                 }
3324                 log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.",
3325                         log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate);
3326
3327                 let mut retain_channel = true;
3328                 let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) {
3329                         Ok(res) => Ok(res),
3330                         Err(e) => {
3331                                 let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
3332                                 if drop { retain_channel = false; }
3333                                 Err(res)
3334                         }
3335                 };
3336                 let ret_err = match res {
3337                         Ok(Some((update_fee, commitment_signed, monitor_update))) => {
3338                                 if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
3339                                         let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, chan_id, COMMITMENT_UPDATE_ONLY);
3340                                         if drop { retain_channel = false; }
3341                                         res
3342                                 } else {
3343                                         pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
3344                                                 node_id: chan.get_counterparty_node_id(),
3345                                                 updates: msgs::CommitmentUpdate {
3346                                                         update_add_htlcs: Vec::new(),
3347                                                         update_fulfill_htlcs: Vec::new(),
3348                                                         update_fail_htlcs: Vec::new(),
3349                                                         update_fail_malformed_htlcs: Vec::new(),
3350                                                         update_fee: Some(update_fee),
3351                                                         commitment_signed,
3352                                                 },
3353                                         });
3354                                         Ok(())
3355                                 }
3356                         },
3357                         Ok(None) => Ok(()),
3358                         Err(e) => Err(e),
3359                 };
3360                 (retain_channel, NotifyOption::DoPersist, ret_err)
3361         }
3362
3363         #[cfg(fuzzing)]
3364         /// In chanmon_consistency we want to sometimes do the channel fee updates done in
3365         /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers
3366         /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what
3367         /// it wants to detect). Thus, we have a variant exposed here for its benefit.
3368         pub fn maybe_update_chan_fees(&self) {
3369                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
3370                         let mut should_persist = NotifyOption::SkipPersist;
3371
3372                         let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
3373
3374                         let mut handle_errors = Vec::new();
3375                         {
3376                                 let mut channel_state_lock = self.channel_state.lock().unwrap();
3377                                 let channel_state = &mut *channel_state_lock;
3378                                 let pending_msg_events = &mut channel_state.pending_msg_events;
3379                                 let short_to_id = &mut channel_state.short_to_id;
3380                                 channel_state.by_id.retain(|chan_id, chan| {
3381                                         let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
3382                                         if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
3383                                         if err.is_err() {
3384                                                 handle_errors.push(err);
3385                                         }
3386                                         retain_channel
3387                                 });
3388                         }
3389
3390                         should_persist
3391                 });
3392         }
3393
3394         /// Performs actions which should happen on startup and roughly once per minute thereafter.
3395         ///
3396         /// This currently includes:
3397         ///  * Increasing or decreasing the on-chain feerate estimates for our outbound channels,
3398         ///  * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more
3399         ///    than a minute, informing the network that they should no longer attempt to route over
3400         ///    the channel.
3401         ///
3402         /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
3403         /// estimate fetches.
3404         pub fn timer_tick_occurred(&self) {
3405                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
3406                         let mut should_persist = NotifyOption::SkipPersist;
3407                         if self.process_background_events() { should_persist = NotifyOption::DoPersist; }
3408
3409                         let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal);
3410
3411                         let mut handle_errors = Vec::new();
3412                         let mut timed_out_mpp_htlcs = Vec::new();
3413                         {
3414                                 let mut channel_state_lock = self.channel_state.lock().unwrap();
3415                                 let channel_state = &mut *channel_state_lock;
3416                                 let pending_msg_events = &mut channel_state.pending_msg_events;
3417                                 let short_to_id = &mut channel_state.short_to_id;
3418                                 channel_state.by_id.retain(|chan_id, chan| {
3419                                         let counterparty_node_id = chan.get_counterparty_node_id();
3420                                         let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate);
3421                                         if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; }
3422                                         if err.is_err() {
3423                                                 handle_errors.push((err, counterparty_node_id));
3424                                         }
3425                                         if !retain_channel { return false; }
3426
3427                                         if let Err(e) = chan.timer_check_closing_negotiation_progress() {
3428                                                 let (needs_close, err) = convert_chan_err!(self, e, short_to_id, chan, chan_id);
3429                                                 handle_errors.push((Err(err), chan.get_counterparty_node_id()));
3430                                                 if needs_close { return false; }
3431                                         }
3432
3433                                         match chan.channel_update_status() {
3434                                                 ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged),
3435                                                 ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged),
3436                                                 ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled),
3437                                                 ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled),
3438                                                 ChannelUpdateStatus::DisabledStaged if !chan.is_live() => {
3439                                                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
3440                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
3441                                                                         msg: update
3442                                                                 });
3443                                                         }
3444                                                         should_persist = NotifyOption::DoPersist;
3445                                                         chan.set_channel_update_status(ChannelUpdateStatus::Disabled);
3446                                                 },
3447                                                 ChannelUpdateStatus::EnabledStaged if chan.is_live() => {
3448                                                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
3449                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
3450                                                                         msg: update
3451                                                                 });
3452                                                         }
3453                                                         should_persist = NotifyOption::DoPersist;
3454                                                         chan.set_channel_update_status(ChannelUpdateStatus::Enabled);
3455                                                 },
3456                                                 _ => {},
3457                                         }
3458
3459                                         true
3460                                 });
3461
3462                                 channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
3463                                         if htlcs.is_empty() {
3464                                                 // This should be unreachable
3465                                                 debug_assert!(false);
3466                                                 return false;
3467                                         }
3468                                         if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload {
3469                                                 // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat).
3470                                                 // In this case we're not going to handle any timeouts of the parts here.
3471                                                 if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) {
3472                                                         return true;
3473                                                 } else if htlcs.into_iter().any(|htlc| {
3474                                                         htlc.timer_ticks += 1;
3475                                                         return htlc.timer_ticks >= MPP_TIMEOUT_TICKS
3476                                                 }) {
3477                                                         timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone())));
3478                                                         return false;
3479                                                 }
3480                                         }
3481                                         true
3482                                 });
3483                         }
3484
3485                         for htlc_source in timed_out_mpp_htlcs.drain(..) {
3486                                 self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), HTLCSource::PreviousHopData(htlc_source.0), &htlc_source.1, HTLCFailReason::Reason { failure_code: 23, data: Vec::new() });
3487                         }
3488
3489                         for (err, counterparty_node_id) in handle_errors.drain(..) {
3490                                 let _ = handle_error!(self, err, counterparty_node_id);
3491                         }
3492                         should_persist
3493                 });
3494         }
3495
3496         /// Indicates that the preimage for payment_hash is unknown or the received amount is incorrect
3497         /// after a PaymentReceived event, failing the HTLC back to its origin and freeing resources
3498         /// along the path (including in our own channel on which we received it).
3499         /// Returns false if no payment was found to fail backwards, true if the process of failing the
3500         /// HTLC backwards has been started.
3501         pub fn fail_htlc_backwards(&self, payment_hash: &PaymentHash) -> bool {
3502                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
3503
3504                 let mut channel_state = Some(self.channel_state.lock().unwrap());
3505                 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(payment_hash);
3506                 if let Some(mut sources) = removed_source {
3507                         for htlc in sources.drain(..) {
3508                                 if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
3509                                 let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
3510                                 htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
3511                                                 self.best_block.read().unwrap().height()));
3512                                 self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
3513                                                 HTLCSource::PreviousHopData(htlc.prev_hop), payment_hash,
3514                                                 HTLCFailReason::Reason { failure_code: 0x4000 | 15, data: htlc_msat_height_data });
3515                         }
3516                         true
3517                 } else { false }
3518         }
3519
3520         /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
3521         /// that we want to return and a channel.
3522         ///
3523         /// This is for failures on the channel on which the HTLC was *received*, not failures
3524         /// forwarding
3525         fn get_htlc_inbound_temp_fail_err_and_data(&self, desired_err_code: u16, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
3526                 // We can't be sure what SCID was used when relaying inbound towards us, so we have to
3527                 // guess somewhat. If its a public channel, we figure best to just use the real SCID (as
3528                 // we're not leaking that we have a channel with the counterparty), otherwise we try to use
3529                 // an inbound SCID alias before the real SCID.
3530                 let scid_pref = if chan.should_announce() {
3531                         chan.get_short_channel_id().or(chan.latest_inbound_scid_alias())
3532                 } else {
3533                         chan.latest_inbound_scid_alias().or(chan.get_short_channel_id())
3534                 };
3535                 if let Some(scid) = scid_pref {
3536                         self.get_htlc_temp_fail_err_and_data(desired_err_code, scid, chan)
3537                 } else {
3538                         (0x4000|10, Vec::new())
3539                 }
3540         }
3541
3542
3543         /// Gets an HTLC onion failure code and error data for an `UPDATE` error, given the error code
3544         /// that we want to return and a channel.
3545         fn get_htlc_temp_fail_err_and_data(&self, desired_err_code: u16, scid: u64, chan: &Channel<Signer>) -> (u16, Vec<u8>) {
3546                 debug_assert_eq!(desired_err_code & 0x1000, 0x1000);
3547                 if let Ok(upd) = self.get_channel_update_for_onion(scid, chan) {
3548                         let mut enc = VecWriter(Vec::with_capacity(upd.serialized_length() + 6));
3549                         if desired_err_code == 0x1000 | 20 {
3550                                 // TODO: underspecified, follow https://github.com/lightning/bolts/issues/791
3551                                 0u16.write(&mut enc).expect("Writes cannot fail");
3552                         }
3553                         (upd.serialized_length() as u16 + 2).write(&mut enc).expect("Writes cannot fail");
3554                         msgs::ChannelUpdate::TYPE.write(&mut enc).expect("Writes cannot fail");
3555                         upd.write(&mut enc).expect("Writes cannot fail");
3556                         (desired_err_code, enc.0)
3557                 } else {
3558                         // If we fail to get a unicast channel_update, it implies we don't yet have an SCID,
3559                         // which means we really shouldn't have gotten a payment to be forwarded over this
3560                         // channel yet, or if we did it's from a route hint. Either way, returning an error of
3561                         // PERM|no_such_channel should be fine.
3562                         (0x4000|10, Vec::new())
3563                 }
3564         }
3565
3566         // Fail a list of HTLCs that were just freed from the holding cell. The HTLCs need to be
3567         // failed backwards or, if they were one of our outgoing HTLCs, then their failure needs to
3568         // be surfaced to the user.
3569         fn fail_holding_cell_htlcs(&self, mut htlcs_to_fail: Vec<(HTLCSource, PaymentHash)>, channel_id: [u8; 32]) {
3570                 for (htlc_src, payment_hash) in htlcs_to_fail.drain(..) {
3571                         match htlc_src {
3572                                 HTLCSource::PreviousHopData(HTLCPreviousHopData { .. }) => {
3573                                         let (failure_code, onion_failure_data) =
3574                                                 match self.channel_state.lock().unwrap().by_id.entry(channel_id) {
3575                                                         hash_map::Entry::Occupied(chan_entry) => {
3576                                                                 self.get_htlc_inbound_temp_fail_err_and_data(0x1000|7, &chan_entry.get())
3577                                                         },
3578                                                         hash_map::Entry::Vacant(_) => (0x4000|10, Vec::new())
3579                                                 };
3580                                         let channel_state = self.channel_state.lock().unwrap();
3581                                         self.fail_htlc_backwards_internal(channel_state,
3582                                                 htlc_src, &payment_hash, HTLCFailReason::Reason { failure_code, data: onion_failure_data});
3583                                 },
3584                                 HTLCSource::OutboundRoute { session_priv, payment_id, path, payment_params, .. } => {
3585                                         let mut session_priv_bytes = [0; 32];
3586                                         session_priv_bytes.copy_from_slice(&session_priv[..]);
3587                                         let mut outbounds = self.pending_outbound_payments.lock().unwrap();
3588                                         if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
3589                                                 if payment.get_mut().remove(&session_priv_bytes, Some(&path)) && !payment.get().is_fulfilled() {
3590                                                         let retry = if let Some(payment_params_data) = payment_params {
3591                                                                 let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
3592                                                                 Some(RouteParameters {
3593                                                                         payment_params: payment_params_data,
3594                                                                         final_value_msat: path_last_hop.fee_msat,
3595                                                                         final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
3596                                                                 })
3597                                                         } else { None };
3598                                                         let mut pending_events = self.pending_events.lock().unwrap();
3599                                                         pending_events.push(events::Event::PaymentPathFailed {
3600                                                                 payment_id: Some(payment_id),
3601                                                                 payment_hash,
3602                                                                 rejected_by_dest: false,
3603                                                                 network_update: None,
3604                                                                 all_paths_failed: payment.get().remaining_parts() == 0,
3605                                                                 path: path.clone(),
3606                                                                 short_channel_id: None,
3607                                                                 retry,
3608                                                                 #[cfg(test)]
3609                                                                 error_code: None,
3610                                                                 #[cfg(test)]
3611                                                                 error_data: None,
3612                                                         });
3613                                                         if payment.get().abandoned() && payment.get().remaining_parts() == 0 {
3614                                                                 pending_events.push(events::Event::PaymentFailed {
3615                                                                         payment_id,
3616                                                                         payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
3617                                                                 });
3618                                                                 payment.remove();
3619                                                         }
3620                                                 }
3621                                         } else {
3622                                                 log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
3623                                         }
3624                                 },
3625                         };
3626                 }
3627         }
3628
3629         /// Fails an HTLC backwards to the sender of it to us.
3630         /// Note that while we take a channel_state lock as input, we do *not* assume consistency here.
3631         /// There are several callsites that do stupid things like loop over a list of payment_hashes
3632         /// to fail and take the channel_state lock for each iteration (as we take ownership and may
3633         /// drop it). In other words, no assumptions are made that entries in claimable_htlcs point to
3634         /// still-available channels.
3635         fn fail_htlc_backwards_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_hash: &PaymentHash, onion_error: HTLCFailReason) {
3636                 //TODO: There is a timing attack here where if a node fails an HTLC back to us they can
3637                 //identify whether we sent it or not based on the (I presume) very different runtime
3638                 //between the branches here. We should make this async and move it into the forward HTLCs
3639                 //timer handling.
3640
3641                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
3642                 // from block_connected which may run during initialization prior to the chain_monitor
3643                 // being fully configured. See the docs for `ChannelManagerReadArgs` for more.
3644                 match source {
3645                         HTLCSource::OutboundRoute { ref path, session_priv, payment_id, ref payment_params, .. } => {
3646                                 let mut session_priv_bytes = [0; 32];
3647                                 session_priv_bytes.copy_from_slice(&session_priv[..]);
3648                                 let mut outbounds = self.pending_outbound_payments.lock().unwrap();
3649                                 let mut all_paths_failed = false;
3650                                 let mut full_failure_ev = None;
3651                                 if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
3652                                         if !payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
3653                                                 log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
3654                                                 return;
3655                                         }
3656                                         if payment.get().is_fulfilled() {
3657                                                 log_trace!(self.logger, "Received failure of HTLC with payment_hash {} after payment completion", log_bytes!(payment_hash.0));
3658                                                 return;
3659                                         }
3660                                         if payment.get().remaining_parts() == 0 {
3661                                                 all_paths_failed = true;
3662                                                 if payment.get().abandoned() {
3663                                                         full_failure_ev = Some(events::Event::PaymentFailed {
3664                                                                 payment_id,
3665                                                                 payment_hash: payment.get().payment_hash().expect("PendingOutboundPayments::RetriesExceeded always has a payment hash set"),
3666                                                         });
3667                                                         payment.remove();
3668                                                 }
3669                                         }
3670                                 } else {
3671                                         log_trace!(self.logger, "Received duplicative fail for HTLC with payment_hash {}", log_bytes!(payment_hash.0));
3672                                         return;
3673                                 }
3674                                 mem::drop(channel_state_lock);
3675                                 let retry = if let Some(payment_params_data) = payment_params {
3676                                         let path_last_hop = path.last().expect("Outbound payments must have had a valid path");
3677                                         Some(RouteParameters {
3678                                                 payment_params: payment_params_data.clone(),
3679                                                 final_value_msat: path_last_hop.fee_msat,
3680                                                 final_cltv_expiry_delta: path_last_hop.cltv_expiry_delta,
3681                                         })
3682                                 } else { None };
3683                                 log_trace!(self.logger, "Failing outbound payment HTLC with payment_hash {}", log_bytes!(payment_hash.0));
3684
3685                                 let path_failure = match &onion_error {
3686                                         &HTLCFailReason::LightningError { ref err } => {
3687 #[cfg(test)]
3688                                                 let (network_update, short_channel_id, payment_retryable, onion_error_code, onion_error_data) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
3689 #[cfg(not(test))]
3690                                                 let (network_update, short_channel_id, payment_retryable, _, _) = onion_utils::process_onion_failure(&self.secp_ctx, &self.logger, &source, err.data.clone());
3691                                                 // TODO: If we decided to blame ourselves (or one of our channels) in
3692                                                 // process_onion_failure we should close that channel as it implies our
3693                                                 // next-hop is needlessly blaming us!
3694                                                 events::Event::PaymentPathFailed {
3695                                                         payment_id: Some(payment_id),
3696                                                         payment_hash: payment_hash.clone(),
3697                                                         rejected_by_dest: !payment_retryable,
3698                                                         network_update,
3699                                                         all_paths_failed,
3700                                                         path: path.clone(),
3701                                                         short_channel_id,
3702                                                         retry,
3703 #[cfg(test)]
3704                                                         error_code: onion_error_code,
3705 #[cfg(test)]
3706                                                         error_data: onion_error_data
3707                                                 }
3708                                         },
3709                                         &HTLCFailReason::Reason {
3710 #[cfg(test)]
3711                                                         ref failure_code,
3712 #[cfg(test)]
3713                                                         ref data,
3714                                                         .. } => {
3715                                                 // we get a fail_malformed_htlc from the first hop
3716                                                 // TODO: We'd like to generate a NetworkUpdate for temporary
3717                                                 // failures here, but that would be insufficient as get_route
3718                                                 // generally ignores its view of our own channels as we provide them via
3719                                                 // ChannelDetails.
3720                                                 // TODO: For non-temporary failures, we really should be closing the
3721                                                 // channel here as we apparently can't relay through them anyway.
3722                                                 events::Event::PaymentPathFailed {
3723                                                         payment_id: Some(payment_id),
3724                                                         payment_hash: payment_hash.clone(),
3725                                                         rejected_by_dest: path.len() == 1,
3726                                                         network_update: None,
3727                                                         all_paths_failed,
3728                                                         path: path.clone(),
3729                                                         short_channel_id: Some(path.first().unwrap().short_channel_id),
3730                                                         retry,
3731 #[cfg(test)]
3732                                                         error_code: Some(*failure_code),
3733 #[cfg(test)]
3734                                                         error_data: Some(data.clone()),
3735                                                 }
3736                                         }
3737                                 };
3738                                 let mut pending_events = self.pending_events.lock().unwrap();
3739                                 pending_events.push(path_failure);
3740                                 if let Some(ev) = full_failure_ev { pending_events.push(ev); }
3741                         },
3742                         HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id, htlc_id, incoming_packet_shared_secret, phantom_shared_secret, .. }) => {
3743                                 let err_packet = match onion_error {
3744                                         HTLCFailReason::Reason { failure_code, data } => {
3745                                                 log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards from us with code {}", log_bytes!(payment_hash.0), failure_code);
3746                                                 if let Some(phantom_ss) = phantom_shared_secret {
3747                                                         let phantom_packet = onion_utils::build_failure_packet(&phantom_ss, failure_code, &data[..]).encode();
3748                                                         let encrypted_phantom_packet = onion_utils::encrypt_failure_packet(&phantom_ss, &phantom_packet);
3749                                                         onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &encrypted_phantom_packet.data[..])
3750                                                 } else {
3751                                                         let packet = onion_utils::build_failure_packet(&incoming_packet_shared_secret, failure_code, &data[..]).encode();
3752                                                         onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &packet)
3753                                                 }
3754                                         },
3755                                         HTLCFailReason::LightningError { err } => {
3756                                                 log_trace!(self.logger, "Failing HTLC with payment_hash {} backwards with pre-built LightningError", log_bytes!(payment_hash.0));
3757                                                 onion_utils::encrypt_failure_packet(&incoming_packet_shared_secret, &err.data)
3758                                         }
3759                                 };
3760
3761                                 let mut forward_event = None;
3762                                 if channel_state_lock.forward_htlcs.is_empty() {
3763                                         forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS));
3764                                 }
3765                                 match channel_state_lock.forward_htlcs.entry(short_channel_id) {
3766                                         hash_map::Entry::Occupied(mut entry) => {
3767                                                 entry.get_mut().push(HTLCForwardInfo::FailHTLC { htlc_id, err_packet });
3768                                         },
3769                                         hash_map::Entry::Vacant(entry) => {
3770                                                 entry.insert(vec!(HTLCForwardInfo::FailHTLC { htlc_id, err_packet }));
3771                                         }
3772                                 }
3773                                 mem::drop(channel_state_lock);
3774                                 if let Some(time) = forward_event {
3775                                         let mut pending_events = self.pending_events.lock().unwrap();
3776                                         pending_events.push(events::Event::PendingHTLCsForwardable {
3777                                                 time_forwardable: time
3778                                         });
3779                                 }
3780                         },
3781                 }
3782         }
3783
3784         /// Provides a payment preimage in response to [`Event::PaymentReceived`], generating any
3785         /// [`MessageSendEvent`]s needed to claim the payment.
3786         ///
3787         /// Note that if you did not set an `amount_msat` when calling [`create_inbound_payment`] or
3788         /// [`create_inbound_payment_for_hash`] you must check that the amount in the `PaymentReceived`
3789         /// event matches your expectation. If you fail to do so and call this method, you may provide
3790         /// the sender "proof-of-payment" when they did not fulfill the full expected payment.
3791         ///
3792         /// Returns whether any HTLCs were claimed, and thus if any new [`MessageSendEvent`]s are now
3793         /// pending for processing via [`get_and_clear_pending_msg_events`].
3794         ///
3795         /// [`Event::PaymentReceived`]: crate::util::events::Event::PaymentReceived
3796         /// [`create_inbound_payment`]: Self::create_inbound_payment
3797         /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
3798         /// [`get_and_clear_pending_msg_events`]: MessageSendEventsProvider::get_and_clear_pending_msg_events
3799         pub fn claim_funds(&self, payment_preimage: PaymentPreimage) -> bool {
3800                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
3801
3802                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
3803
3804                 let mut channel_state = Some(self.channel_state.lock().unwrap());
3805                 let removed_source = channel_state.as_mut().unwrap().claimable_htlcs.remove(&payment_hash);
3806                 if let Some(mut sources) = removed_source {
3807                         assert!(!sources.is_empty());
3808
3809                         // If we are claiming an MPP payment, we have to take special care to ensure that each
3810                         // channel exists before claiming all of the payments (inside one lock).
3811                         // Note that channel existance is sufficient as we should always get a monitor update
3812                         // which will take care of the real HTLC claim enforcement.
3813                         //
3814                         // If we find an HTLC which we would need to claim but for which we do not have a
3815                         // channel, we will fail all parts of the MPP payment. While we could wait and see if
3816                         // the sender retries the already-failed path(s), it should be a pretty rare case where
3817                         // we got all the HTLCs and then a channel closed while we were waiting for the user to
3818                         // provide the preimage, so worrying too much about the optimal handling isn't worth
3819                         // it.
3820                         let mut valid_mpp = true;
3821                         for htlc in sources.iter() {
3822                                 if let None = channel_state.as_ref().unwrap().short_to_id.get(&htlc.prev_hop.short_channel_id) {
3823                                         valid_mpp = false;
3824                                         break;
3825                                 }
3826                         }
3827
3828                         let mut errs = Vec::new();
3829                         let mut claimed_any_htlcs = false;
3830                         for htlc in sources.drain(..) {
3831                                 if !valid_mpp {
3832                                         if channel_state.is_none() { channel_state = Some(self.channel_state.lock().unwrap()); }
3833                                         let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
3834                                         htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(
3835                                                         self.best_block.read().unwrap().height()));
3836                                         self.fail_htlc_backwards_internal(channel_state.take().unwrap(),
3837                                                                          HTLCSource::PreviousHopData(htlc.prev_hop), &payment_hash,
3838                                                                          HTLCFailReason::Reason { failure_code: 0x4000|15, data: htlc_msat_height_data });
3839                                 } else {
3840                                         match self.claim_funds_from_hop(channel_state.as_mut().unwrap(), htlc.prev_hop, payment_preimage) {
3841                                                 ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) => {
3842                                                         if let msgs::ErrorAction::IgnoreError = err.err.action {
3843                                                                 // We got a temporary failure updating monitor, but will claim the
3844                                                                 // HTLC when the monitor updating is restored (or on chain).
3845                                                                 log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err);
3846                                                                 claimed_any_htlcs = true;
3847                                                         } else { errs.push((pk, err)); }
3848                                                 },
3849                                                 ClaimFundsFromHop::PrevHopForceClosed => unreachable!("We already checked for channel existence, we can't fail here!"),
3850                                                 ClaimFundsFromHop::DuplicateClaim => {
3851                                                         // While we should never get here in most cases, if we do, it likely
3852                                                         // indicates that the HTLC was timed out some time ago and is no longer
3853                                                         // available to be claimed. Thus, it does not make sense to set
3854                                                         // `claimed_any_htlcs`.
3855                                                 },
3856                                                 ClaimFundsFromHop::Success(_) => claimed_any_htlcs = true,
3857                                         }
3858                                 }
3859                         }
3860
3861                         // Now that we've done the entire above loop in one lock, we can handle any errors
3862                         // which were generated.
3863                         channel_state.take();
3864
3865                         for (counterparty_node_id, err) in errs.drain(..) {
3866                                 let res: Result<(), _> = Err(err);
3867                                 let _ = handle_error!(self, res, counterparty_node_id);
3868                         }
3869
3870                         claimed_any_htlcs
3871                 } else { false }
3872         }
3873
3874         fn claim_funds_from_hop(&self, channel_state_lock: &mut MutexGuard<ChannelHolder<Signer>>, prev_hop: HTLCPreviousHopData, payment_preimage: PaymentPreimage) -> ClaimFundsFromHop {
3875                 //TODO: Delay the claimed_funds relaying just like we do outbound relay!
3876                 let channel_state = &mut **channel_state_lock;
3877                 let chan_id = match channel_state.short_to_id.get(&prev_hop.short_channel_id) {
3878                         Some(chan_id) => chan_id.clone(),
3879                         None => {
3880                                 return ClaimFundsFromHop::PrevHopForceClosed
3881                         }
3882                 };
3883
3884                 if let hash_map::Entry::Occupied(mut chan) = channel_state.by_id.entry(chan_id) {
3885                         match chan.get_mut().get_update_fulfill_htlc_and_commit(prev_hop.htlc_id, payment_preimage, &self.logger) {
3886                                 Ok(msgs_monitor_option) => {
3887                                         if let UpdateFulfillCommitFetch::NewClaim { msgs, htlc_value_msat, monitor_update } = msgs_monitor_option {
3888                                                 if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
3889                                                         log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Debug },
3890                                                                 "Failed to update channel monitor with preimage {:?}: {:?}",
3891                                                                 payment_preimage, e);
3892                                                         return ClaimFundsFromHop::MonitorUpdateFail(
3893                                                                 chan.get().get_counterparty_node_id(),
3894                                                                 handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::CommitmentFirst, false, msgs.is_some()).unwrap_err(),
3895                                                                 Some(htlc_value_msat)
3896                                                         );
3897                                                 }
3898                                                 if let Some((msg, commitment_signed)) = msgs {
3899                                                         log_debug!(self.logger, "Claiming funds for HTLC with preimage {} resulted in a commitment_signed for channel {}",
3900                                                                 log_bytes!(payment_preimage.0), log_bytes!(chan.get().channel_id()));
3901                                                         channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
3902                                                                 node_id: chan.get().get_counterparty_node_id(),
3903                                                                 updates: msgs::CommitmentUpdate {
3904                                                                         update_add_htlcs: Vec::new(),
3905                                                                         update_fulfill_htlcs: vec![msg],
3906                                                                         update_fail_htlcs: Vec::new(),
3907                                                                         update_fail_malformed_htlcs: Vec::new(),
3908                                                                         update_fee: None,
3909                                                                         commitment_signed,
3910                                                                 }
3911                                                         });
3912                                                 }
3913                                                 return ClaimFundsFromHop::Success(htlc_value_msat);
3914                                         } else {
3915                                                 return ClaimFundsFromHop::DuplicateClaim;
3916                                         }
3917                                 },
3918                                 Err((e, monitor_update)) => {
3919                                         if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
3920                                                 log_given_level!(self.logger, if e == ChannelMonitorUpdateErr::PermanentFailure { Level::Error } else { Level::Info },
3921                                                         "Failed to update channel monitor with preimage {:?} immediately prior to force-close: {:?}",
3922                                                         payment_preimage, e);
3923                                         }
3924                                         let counterparty_node_id = chan.get().get_counterparty_node_id();
3925                                         let (drop, res) = convert_chan_err!(self, e, channel_state.short_to_id, chan.get_mut(), &chan_id);
3926                                         if drop {
3927                                                 chan.remove_entry();
3928                                         }
3929                                         return ClaimFundsFromHop::MonitorUpdateFail(counterparty_node_id, res, None);
3930                                 },
3931                         }
3932                 } else { unreachable!(); }
3933         }
3934
3935         fn finalize_claims(&self, mut sources: Vec<HTLCSource>) {
3936                 let mut outbounds = self.pending_outbound_payments.lock().unwrap();
3937                 let mut pending_events = self.pending_events.lock().unwrap();
3938                 for source in sources.drain(..) {
3939                         if let HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } = source {
3940                                 let mut session_priv_bytes = [0; 32];
3941                                 session_priv_bytes.copy_from_slice(&session_priv[..]);
3942                                 if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
3943                                         assert!(payment.get().is_fulfilled());
3944                                         if payment.get_mut().remove(&session_priv_bytes, None) {
3945                                                 pending_events.push(
3946                                                         events::Event::PaymentPathSuccessful {
3947                                                                 payment_id,
3948                                                                 payment_hash: payment.get().payment_hash(),
3949                                                                 path,
3950                                                         }
3951                                                 );
3952                                         }
3953                                         if payment.get().remaining_parts() == 0 {
3954                                                 payment.remove();
3955                                         }
3956                                 }
3957                         }
3958                 }
3959         }
3960
3961         fn claim_funds_internal(&self, mut channel_state_lock: MutexGuard<ChannelHolder<Signer>>, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
3962                 match source {
3963                         HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
3964                                 mem::drop(channel_state_lock);
3965                                 let mut session_priv_bytes = [0; 32];
3966                                 session_priv_bytes.copy_from_slice(&session_priv[..]);
3967                                 let mut outbounds = self.pending_outbound_payments.lock().unwrap();
3968                                 if let hash_map::Entry::Occupied(mut payment) = outbounds.entry(payment_id) {
3969                                         let mut pending_events = self.pending_events.lock().unwrap();
3970                                         if !payment.get().is_fulfilled() {
3971                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
3972                                                 let fee_paid_msat = payment.get().get_pending_fee_msat();
3973                                                 pending_events.push(
3974                                                         events::Event::PaymentSent {
3975                                                                 payment_id: Some(payment_id),
3976                                                                 payment_preimage,
3977                                                                 payment_hash,
3978                                                                 fee_paid_msat,
3979                                                         }
3980                                                 );
3981                                                 payment.get_mut().mark_fulfilled();
3982                                         }
3983
3984                                         if from_onchain {
3985                                                 // We currently immediately remove HTLCs which were fulfilled on-chain.
3986                                                 // This could potentially lead to removing a pending payment too early,
3987                                                 // with a reorg of one block causing us to re-add the fulfilled payment on
3988                                                 // restart.
3989                                                 // TODO: We should have a second monitor event that informs us of payments
3990                                                 // irrevocably fulfilled.
3991                                                 if payment.get_mut().remove(&session_priv_bytes, Some(&path)) {
3992                                                         let payment_hash = Some(PaymentHash(Sha256::hash(&payment_preimage.0).into_inner()));
3993                                                         pending_events.push(
3994                                                                 events::Event::PaymentPathSuccessful {
3995                                                                         payment_id,
3996                                                                         payment_hash,
3997                                                                         path,
3998                                                                 }
3999                                                         );
4000                                                 }
4001
4002                                                 if payment.get().remaining_parts() == 0 {
4003                                                         payment.remove();
4004                                                 }
4005                                         }
4006                                 } else {
4007                                         log_trace!(self.logger, "Received duplicative fulfill for HTLC with payment_preimage {}", log_bytes!(payment_preimage.0));
4008                                 }
4009                         },
4010                         HTLCSource::PreviousHopData(hop_data) => {
4011                                 let prev_outpoint = hop_data.outpoint;
4012                                 let res = self.claim_funds_from_hop(&mut channel_state_lock, hop_data, payment_preimage);
4013                                 let claimed_htlc = if let ClaimFundsFromHop::DuplicateClaim = res { false } else { true };
4014                                 let htlc_claim_value_msat = match res {
4015                                         ClaimFundsFromHop::MonitorUpdateFail(_, _, amt_opt) => amt_opt,
4016                                         ClaimFundsFromHop::Success(amt) => Some(amt),
4017                                         _ => None,
4018                                 };
4019                                 if let ClaimFundsFromHop::PrevHopForceClosed = res {
4020                                         let preimage_update = ChannelMonitorUpdate {
4021                                                 update_id: CLOSED_CHANNEL_UPDATE_ID,
4022                                                 updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
4023                                                         payment_preimage: payment_preimage.clone(),
4024                                                 }],
4025                                         };
4026                                         // We update the ChannelMonitor on the backward link, after
4027                                         // receiving an offchain preimage event from the forward link (the
4028                                         // event being update_fulfill_htlc).
4029                                         if let Err(e) = self.chain_monitor.update_channel(prev_outpoint, preimage_update) {
4030                                                 log_error!(self.logger, "Critical error: failed to update channel monitor with preimage {:?}: {:?}",
4031                                                                                          payment_preimage, e);
4032                                         }
4033                                         // Note that we do *not* set `claimed_htlc` to false here. In fact, this
4034                                         // totally could be a duplicate claim, but we have no way of knowing
4035                                         // without interrogating the `ChannelMonitor` we've provided the above
4036                                         // update to. Instead, we simply document in `PaymentForwarded` that this
4037                                         // can happen.
4038                                 }
4039                                 mem::drop(channel_state_lock);
4040                                 if let ClaimFundsFromHop::MonitorUpdateFail(pk, err, _) = res {
4041                                         let result: Result<(), _> = Err(err);
4042                                         let _ = handle_error!(self, result, pk);
4043                                 }
4044
4045                                 if claimed_htlc {
4046                                         if let Some(forwarded_htlc_value) = forwarded_htlc_value_msat {
4047                                                 let fee_earned_msat = if let Some(claimed_htlc_value) = htlc_claim_value_msat {
4048                                                         Some(claimed_htlc_value - forwarded_htlc_value)
4049                                                 } else { None };
4050
4051                                                 let mut pending_events = self.pending_events.lock().unwrap();
4052                                                 let prev_channel_id = Some(prev_outpoint.to_channel_id());
4053                                                 let next_channel_id = Some(next_channel_id);
4054
4055                                                 pending_events.push(events::Event::PaymentForwarded {
4056                                                         fee_earned_msat,
4057                                                         claim_from_onchain_tx: from_onchain,
4058                                                         prev_channel_id,
4059                                                         next_channel_id,
4060                                                 });
4061                                         }
4062                                 }
4063                         },
4064                 }
4065         }
4066
4067         /// Gets the node_id held by this ChannelManager
4068         pub fn get_our_node_id(&self) -> PublicKey {
4069                 self.our_network_pubkey.clone()
4070         }
4071
4072         fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) {
4073                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
4074
4075                 let chan_restoration_res;
4076                 let (mut pending_failures, finalized_claims) = {
4077                         let mut channel_lock = self.channel_state.lock().unwrap();
4078                         let channel_state = &mut *channel_lock;
4079                         let mut channel = match channel_state.by_id.entry(funding_txo.to_channel_id()) {
4080                                 hash_map::Entry::Occupied(chan) => chan,
4081                                 hash_map::Entry::Vacant(_) => return,
4082                         };
4083                         if !channel.get().is_awaiting_monitor_update() || channel.get().get_latest_monitor_update_id() != highest_applied_update_id {
4084                                 return;
4085                         }
4086
4087                         let updates = channel.get_mut().monitor_updating_restored(&self.logger, self.get_our_node_id(), self.genesis_hash, self.best_block.read().unwrap().height());
4088                         let channel_update = if updates.funding_locked.is_some() && channel.get().is_usable() {
4089                                 // We only send a channel_update in the case where we are just now sending a
4090                                 // funding_locked and the channel is in a usable state. We may re-send a
4091                                 // channel_update later through the announcement_signatures process for public
4092                                 // channels, but there's no reason not to just inform our counterparty of our fees
4093                                 // now.
4094                                 if let Ok(msg) = self.get_channel_update_for_unicast(channel.get()) {
4095                                         Some(events::MessageSendEvent::SendChannelUpdate {
4096                                                 node_id: channel.get().get_counterparty_node_id(),
4097                                                 msg,
4098                                         })
4099                                 } else { None }
4100                         } else { None };
4101                         chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.funding_locked, updates.announcement_sigs);
4102                         if let Some(upd) = channel_update {
4103                                 channel_state.pending_msg_events.push(upd);
4104                         }
4105                         (updates.failed_htlcs, updates.finalized_claimed_htlcs)
4106                 };
4107                 post_handle_chan_restoration!(self, chan_restoration_res);
4108                 self.finalize_claims(finalized_claims);
4109                 for failure in pending_failures.drain(..) {
4110                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
4111                 }
4112         }
4113
4114         /// Called to accept a request to open a channel after [`Event::OpenChannelRequest`] has been
4115         /// triggered.
4116         ///
4117         /// The `temporary_channel_id` parameter indicates which inbound channel should be accepted,
4118         /// and the `counterparty_node_id` parameter is the id of the peer which has requested to open
4119         /// the channel.
4120         ///
4121         /// For inbound channels, the `user_channel_id` parameter will be provided back in
4122         /// [`Event::ChannelClosed::user_channel_id`] to allow tracking of which events correspond
4123         /// with which `accept_inbound_channel` call.
4124         ///
4125         /// [`Event::OpenChannelRequest`]: events::Event::OpenChannelRequest
4126         /// [`Event::ChannelClosed::user_channel_id`]: events::Event::ChannelClosed::user_channel_id
4127         pub fn accept_inbound_channel(&self, temporary_channel_id: &[u8; 32], counterparty_node_id: &PublicKey, user_channel_id: u64) -> Result<(), APIError> {
4128                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
4129
4130                 let mut channel_state_lock = self.channel_state.lock().unwrap();
4131                 let channel_state = &mut *channel_state_lock;
4132                 match channel_state.by_id.entry(temporary_channel_id.clone()) {
4133                         hash_map::Entry::Occupied(mut channel) => {
4134                                 if !channel.get().inbound_is_awaiting_accept() {
4135                                         return Err(APIError::APIMisuseError { err: "The channel isn't currently awaiting to be accepted.".to_owned() });
4136                                 }
4137                                 if *counterparty_node_id != channel.get().get_counterparty_node_id() {
4138                                         return Err(APIError::APIMisuseError { err: "The passed counterparty_node_id doesn't match the channel's counterparty node_id".to_owned() });
4139                                 }
4140                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
4141                                         node_id: channel.get().get_counterparty_node_id(),
4142                                         msg: channel.get_mut().accept_inbound_channel(user_channel_id),
4143                                 });
4144                         }
4145                         hash_map::Entry::Vacant(_) => {
4146                                 return Err(APIError::ChannelUnavailable { err: "Can't accept a channel that doesn't exist".to_owned() });
4147                         }
4148                 }
4149                 Ok(())
4150         }
4151
4152         fn internal_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) -> Result<(), MsgHandleErrInternal> {
4153                 if msg.chain_hash != self.genesis_hash {
4154                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Unknown genesis block hash".to_owned(), msg.temporary_channel_id.clone()));
4155                 }
4156
4157                 if !self.default_configuration.accept_inbound_channels {
4158                         return Err(MsgHandleErrInternal::send_err_msg_no_close("No inbound channels accepted".to_owned(), msg.temporary_channel_id.clone()));
4159                 }
4160
4161                 let outbound_scid_alias = self.create_and_insert_outbound_scid_alias();
4162                 let mut channel = match Channel::new_from_req(&self.fee_estimator, &self.keys_manager,
4163                         counterparty_node_id.clone(), &their_features, msg, 0, &self.default_configuration,
4164                         self.best_block.read().unwrap().height(), &self.logger, outbound_scid_alias)
4165                 {
4166                         Err(e) => {
4167                                 self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
4168                                 return Err(MsgHandleErrInternal::from_chan_no_close(e, msg.temporary_channel_id));
4169                         },
4170                         Ok(res) => res
4171                 };
4172                 let mut channel_state_lock = self.channel_state.lock().unwrap();
4173                 let channel_state = &mut *channel_state_lock;
4174                 match channel_state.by_id.entry(channel.channel_id()) {
4175                         hash_map::Entry::Occupied(_) => {
4176                                 self.outbound_scid_aliases.lock().unwrap().remove(&outbound_scid_alias);
4177                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("temporary_channel_id collision!".to_owned(), msg.temporary_channel_id.clone()))
4178                         },
4179                         hash_map::Entry::Vacant(entry) => {
4180                                 if !self.default_configuration.manually_accept_inbound_channels {
4181                                         channel_state.pending_msg_events.push(events::MessageSendEvent::SendAcceptChannel {
4182                                                 node_id: counterparty_node_id.clone(),
4183                                                 msg: channel.accept_inbound_channel(0),
4184                                         });
4185                                 } else {
4186                                         let mut pending_events = self.pending_events.lock().unwrap();
4187                                         pending_events.push(
4188                                                 events::Event::OpenChannelRequest {
4189                                                         temporary_channel_id: msg.temporary_channel_id.clone(),
4190                                                         counterparty_node_id: counterparty_node_id.clone(),
4191                                                         funding_satoshis: msg.funding_satoshis,
4192                                                         push_msat: msg.push_msat,
4193                                                         channel_type: channel.get_channel_type().clone(),
4194                                                 }
4195                                         );
4196                                 }
4197
4198                                 entry.insert(channel);
4199                         }
4200                 }
4201                 Ok(())
4202         }
4203
4204         fn internal_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) -> Result<(), MsgHandleErrInternal> {
4205                 let (value, output_script, user_id) = {
4206                         let mut channel_lock = self.channel_state.lock().unwrap();
4207                         let channel_state = &mut *channel_lock;
4208                         match channel_state.by_id.entry(msg.temporary_channel_id) {
4209                                 hash_map::Entry::Occupied(mut chan) => {
4210                                         if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4211                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
4212                                         }
4213                                         try_chan_entry!(self, chan.get_mut().accept_channel(&msg, &self.default_configuration.peer_channel_config_limits, &their_features), channel_state, chan);
4214                                         (chan.get().get_value_satoshis(), chan.get().get_funding_redeemscript().to_v0_p2wsh(), chan.get().get_user_id())
4215                                 },
4216                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
4217                         }
4218                 };
4219                 let mut pending_events = self.pending_events.lock().unwrap();
4220                 pending_events.push(events::Event::FundingGenerationReady {
4221                         temporary_channel_id: msg.temporary_channel_id,
4222                         counterparty_node_id: *counterparty_node_id,
4223                         channel_value_satoshis: value,
4224                         output_script,
4225                         user_channel_id: user_id,
4226                 });
4227                 Ok(())
4228         }
4229
4230         fn internal_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) -> Result<(), MsgHandleErrInternal> {
4231                 let ((funding_msg, monitor), mut chan) = {
4232                         let best_block = *self.best_block.read().unwrap();
4233                         let mut channel_lock = self.channel_state.lock().unwrap();
4234                         let channel_state = &mut *channel_lock;
4235                         match channel_state.by_id.entry(msg.temporary_channel_id.clone()) {
4236                                 hash_map::Entry::Occupied(mut chan) => {
4237                                         if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4238                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.temporary_channel_id));
4239                                         }
4240                                         (try_chan_entry!(self, chan.get_mut().funding_created(msg, best_block, &self.logger), channel_state, chan), chan.remove())
4241                                 },
4242                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.temporary_channel_id))
4243                         }
4244                 };
4245                 // Because we have exclusive ownership of the channel here we can release the channel_state
4246                 // lock before watch_channel
4247                 if let Err(e) = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor) {
4248                         match e {
4249                                 ChannelMonitorUpdateErr::PermanentFailure => {
4250                                         // Note that we reply with the new channel_id in error messages if we gave up on the
4251                                         // channel, not the temporary_channel_id. This is compatible with ourselves, but the
4252                                         // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
4253                                         // any messages referencing a previously-closed channel anyway.
4254                                         // We do not do a force-close here as that would generate a monitor update for
4255                                         // a monitor that we didn't manage to store (and that we don't care about - we
4256                                         // don't respond with the funding_signed so the channel can never go on chain).
4257                                         let (_monitor_update, failed_htlcs) = chan.force_shutdown(true);
4258                                         assert!(failed_htlcs.is_empty());
4259                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("ChannelMonitor storage failure".to_owned(), funding_msg.channel_id));
4260                                 },
4261                                 ChannelMonitorUpdateErr::TemporaryFailure => {
4262                                         // There's no problem signing a counterparty's funding transaction if our monitor
4263                                         // hasn't persisted to disk yet - we can't lose money on a transaction that we haven't
4264                                         // accepted payment from yet. We do, however, need to wait to send our funding_locked
4265                                         // until we have persisted our monitor.
4266                                         chan.monitor_update_failed(false, false, Vec::new(), Vec::new(), Vec::new());
4267                                 },
4268                         }
4269                 }
4270                 let mut channel_state_lock = self.channel_state.lock().unwrap();
4271                 let channel_state = &mut *channel_state_lock;
4272                 match channel_state.by_id.entry(funding_msg.channel_id) {
4273                         hash_map::Entry::Occupied(_) => {
4274                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Already had channel with the new channel_id".to_owned(), funding_msg.channel_id))
4275                         },
4276                         hash_map::Entry::Vacant(e) => {
4277                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendFundingSigned {
4278                                         node_id: counterparty_node_id.clone(),
4279                                         msg: funding_msg,
4280                                 });
4281                                 e.insert(chan);
4282                         }
4283                 }
4284                 Ok(())
4285         }
4286
4287         fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
4288                 let funding_tx = {
4289                         let best_block = *self.best_block.read().unwrap();
4290                         let mut channel_lock = self.channel_state.lock().unwrap();
4291                         let channel_state = &mut *channel_lock;
4292                         match channel_state.by_id.entry(msg.channel_id) {
4293                                 hash_map::Entry::Occupied(mut chan) => {
4294                                         if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4295                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4296                                         }
4297                                         let (monitor, funding_tx) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
4298                                                 Ok(update) => update,
4299                                                 Err(e) => try_chan_entry!(self, Err(e), channel_state, chan),
4300                                         };
4301                                         if let Err(e) = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
4302                                                 let mut res = handle_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, false, false);
4303                                                 if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4304                                                         // We weren't able to watch the channel to begin with, so no updates should be made on
4305                                                         // it. Previously, full_stack_target found an (unreachable) panic when the
4306                                                         // monitor update contained within `shutdown_finish` was applied.
4307                                                         if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4308                                                                 shutdown_finish.0.take();
4309                                                         }
4310                                                 }
4311                                                 return res
4312                                         }
4313                                         funding_tx
4314                                 },
4315                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4316                         }
4317                 };
4318                 log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
4319                 self.tx_broadcaster.broadcast_transaction(&funding_tx);
4320                 Ok(())
4321         }
4322
4323         fn internal_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) -> Result<(), MsgHandleErrInternal> {
4324                 let mut channel_state_lock = self.channel_state.lock().unwrap();
4325                 let channel_state = &mut *channel_state_lock;
4326                 match channel_state.by_id.entry(msg.channel_id) {
4327                         hash_map::Entry::Occupied(mut chan) => {
4328                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4329                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4330                                 }
4331                                 let announcement_sigs_opt = try_chan_entry!(self, chan.get_mut().funding_locked(&msg, self.get_our_node_id(),
4332                                         self.genesis_hash.clone(), &self.best_block.read().unwrap(), &self.logger), channel_state, chan);
4333                                 if let Some(announcement_sigs) = announcement_sigs_opt {
4334                                         log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(chan.get().channel_id()));
4335                                         channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
4336                                                 node_id: counterparty_node_id.clone(),
4337                                                 msg: announcement_sigs,
4338                                         });
4339                                 } else if chan.get().is_usable() {
4340                                         // If we're sending an announcement_signatures, we'll send the (public)
4341                                         // channel_update after sending a channel_announcement when we receive our
4342                                         // counterparty's announcement_signatures. Thus, we only bother to send a
4343                                         // channel_update here if the channel is not public, i.e. we're not sending an
4344                                         // announcement_signatures.
4345                                         log_trace!(self.logger, "Sending private initial channel_update for our counterparty on channel {}", log_bytes!(chan.get().channel_id()));
4346                                         if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
4347                                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
4348                                                         node_id: counterparty_node_id.clone(),
4349                                                         msg,
4350                                                 });
4351                                         }
4352                                 }
4353                                 Ok(())
4354                         },
4355                         hash_map::Entry::Vacant(_) => Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4356                 }
4357         }
4358
4359         fn internal_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) -> Result<(), MsgHandleErrInternal> {
4360                 let mut dropped_htlcs: Vec<(HTLCSource, PaymentHash)>;
4361                 let result: Result<(), _> = loop {
4362                         let mut channel_state_lock = self.channel_state.lock().unwrap();
4363                         let channel_state = &mut *channel_state_lock;
4364
4365                         match channel_state.by_id.entry(msg.channel_id.clone()) {
4366                                 hash_map::Entry::Occupied(mut chan_entry) => {
4367                                         if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
4368                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4369                                         }
4370
4371                                         if !chan_entry.get().received_shutdown() {
4372                                                 log_info!(self.logger, "Received a shutdown message from our counterparty for channel {}{}.",
4373                                                         log_bytes!(msg.channel_id),
4374                                                         if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
4375                                         }
4376
4377                                         let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), channel_state, chan_entry);
4378                                         dropped_htlcs = htlcs;
4379
4380                                         // Update the monitor with the shutdown script if necessary.
4381                                         if let Some(monitor_update) = monitor_update {
4382                                                 if let Err(e) = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), monitor_update) {
4383                                                         let (result, is_permanent) =
4384                                                                 handle_monitor_err!(self, e, channel_state.short_to_id, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
4385                                                         if is_permanent {
4386                                                                 remove_channel!(self, channel_state, chan_entry);
4387                                                                 break result;
4388                                                         }
4389                                                 }
4390                                         }
4391
4392                                         if let Some(msg) = shutdown {
4393                                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
4394                                                         node_id: *counterparty_node_id,
4395                                                         msg,
4396                                                 });
4397                                         }
4398
4399                                         break Ok(());
4400                                 },
4401                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4402                         }
4403                 };
4404                 for htlc_source in dropped_htlcs.drain(..) {
4405                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
4406                 }
4407
4408                 let _ = handle_error!(self, result, *counterparty_node_id);
4409                 Ok(())
4410         }
4411
4412         fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
4413                 let (tx, chan_option) = {
4414                         let mut channel_state_lock = self.channel_state.lock().unwrap();
4415                         let channel_state = &mut *channel_state_lock;
4416                         match channel_state.by_id.entry(msg.channel_id.clone()) {
4417                                 hash_map::Entry::Occupied(mut chan_entry) => {
4418                                         if chan_entry.get().get_counterparty_node_id() != *counterparty_node_id {
4419                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4420                                         }
4421                                         let (closing_signed, tx) = try_chan_entry!(self, chan_entry.get_mut().closing_signed(&self.fee_estimator, &msg), channel_state, chan_entry);
4422                                         if let Some(msg) = closing_signed {
4423                                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
4424                                                         node_id: counterparty_node_id.clone(),
4425                                                         msg,
4426                                                 });
4427                                         }
4428                                         if tx.is_some() {
4429                                                 // We're done with this channel, we've got a signed closing transaction and
4430                                                 // will send the closing_signed back to the remote peer upon return. This
4431                                                 // also implies there are no pending HTLCs left on the channel, so we can
4432                                                 // fully delete it from tracking (the channel monitor is still around to
4433                                                 // watch for old state broadcasts)!
4434                                                 (tx, Some(remove_channel!(self, channel_state, chan_entry)))
4435                                         } else { (tx, None) }
4436                                 },
4437                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4438                         }
4439                 };
4440                 if let Some(broadcast_tx) = tx {
4441                         log_info!(self.logger, "Broadcasting {}", log_tx!(broadcast_tx));
4442                         self.tx_broadcaster.broadcast_transaction(&broadcast_tx);
4443                 }
4444                 if let Some(chan) = chan_option {
4445                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
4446                                 let mut channel_state = self.channel_state.lock().unwrap();
4447                                 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
4448                                         msg: update
4449                                 });
4450                         }
4451                         self.issue_channel_close_events(&chan, ClosureReason::CooperativeClosure);
4452                 }
4453                 Ok(())
4454         }
4455
4456         fn internal_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) -> Result<(), MsgHandleErrInternal> {
4457                 //TODO: BOLT 4 points out a specific attack where a peer may re-send an onion packet and
4458                 //determine the state of the payment based on our response/if we forward anything/the time
4459                 //we take to respond. We should take care to avoid allowing such an attack.
4460                 //
4461                 //TODO: There exists a further attack where a node may garble the onion data, forward it to
4462                 //us repeatedly garbled in different ways, and compare our error messages, which are
4463                 //encrypted with the same key. It's not immediately obvious how to usefully exploit that,
4464                 //but we should prevent it anyway.
4465
4466                 let (pending_forward_info, mut channel_state_lock) = self.decode_update_add_htlc_onion(msg);
4467                 let channel_state = &mut *channel_state_lock;
4468
4469                 match channel_state.by_id.entry(msg.channel_id) {
4470                         hash_map::Entry::Occupied(mut chan) => {
4471                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4472                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4473                                 }
4474
4475                                 let create_pending_htlc_status = |chan: &Channel<Signer>, pending_forward_info: PendingHTLCStatus, error_code: u16| {
4476                                         // If the update_add is completely bogus, the call will Err and we will close,
4477                                         // but if we've sent a shutdown and they haven't acknowledged it yet, we just
4478                                         // want to reject the new HTLC and fail it backwards instead of forwarding.
4479                                         match pending_forward_info {
4480                                                 PendingHTLCStatus::Forward(PendingHTLCInfo { ref incoming_shared_secret, .. }) => {
4481                                                         let reason = if (error_code & 0x1000) != 0 {
4482                                                                 let (real_code, error_data) = self.get_htlc_inbound_temp_fail_err_and_data(error_code, chan);
4483                                                                 onion_utils::build_first_hop_failure_packet(incoming_shared_secret, real_code, &error_data)
4484                                                         } else {
4485                                                                 onion_utils::build_first_hop_failure_packet(incoming_shared_secret, error_code, &[])
4486                                                         };
4487                                                         let msg = msgs::UpdateFailHTLC {
4488                                                                 channel_id: msg.channel_id,
4489                                                                 htlc_id: msg.htlc_id,
4490                                                                 reason
4491                                                         };
4492                                                         PendingHTLCStatus::Fail(HTLCFailureMsg::Relay(msg))
4493                                                 },
4494                                                 _ => pending_forward_info
4495                                         }
4496                                 };
4497                                 try_chan_entry!(self, chan.get_mut().update_add_htlc(&msg, pending_forward_info, create_pending_htlc_status, &self.logger), channel_state, chan);
4498                         },
4499                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4500                 }
4501                 Ok(())
4502         }
4503
4504         fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
4505                 let mut channel_lock = self.channel_state.lock().unwrap();
4506                 let (htlc_source, forwarded_htlc_value) = {
4507                         let channel_state = &mut *channel_lock;
4508                         match channel_state.by_id.entry(msg.channel_id) {
4509                                 hash_map::Entry::Occupied(mut chan) => {
4510                                         if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4511                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4512                                         }
4513                                         try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), channel_state, chan)
4514                                 },
4515                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4516                         }
4517                 };
4518                 self.claim_funds_internal(channel_lock, htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
4519                 Ok(())
4520         }
4521
4522         fn internal_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) -> Result<(), MsgHandleErrInternal> {
4523                 let mut channel_lock = self.channel_state.lock().unwrap();
4524                 let channel_state = &mut *channel_lock;
4525                 match channel_state.by_id.entry(msg.channel_id) {
4526                         hash_map::Entry::Occupied(mut chan) => {
4527                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4528                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4529                                 }
4530                                 try_chan_entry!(self, chan.get_mut().update_fail_htlc(&msg, HTLCFailReason::LightningError { err: msg.reason.clone() }), channel_state, chan);
4531                         },
4532                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4533                 }
4534                 Ok(())
4535         }
4536
4537         fn internal_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) -> Result<(), MsgHandleErrInternal> {
4538                 let mut channel_lock = self.channel_state.lock().unwrap();
4539                 let channel_state = &mut *channel_lock;
4540                 match channel_state.by_id.entry(msg.channel_id) {
4541                         hash_map::Entry::Occupied(mut chan) => {
4542                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4543                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4544                                 }
4545                                 if (msg.failure_code & 0x8000) == 0 {
4546                                         let chan_err: ChannelError = ChannelError::Close("Got update_fail_malformed_htlc with BADONION not set".to_owned());
4547                                         try_chan_entry!(self, Err(chan_err), channel_state, chan);
4548                                 }
4549                                 try_chan_entry!(self, chan.get_mut().update_fail_malformed_htlc(&msg, HTLCFailReason::Reason { failure_code: msg.failure_code, data: Vec::new() }), channel_state, chan);
4550                                 Ok(())
4551                         },
4552                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4553                 }
4554         }
4555
4556         fn internal_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) -> Result<(), MsgHandleErrInternal> {
4557                 let mut channel_state_lock = self.channel_state.lock().unwrap();
4558                 let channel_state = &mut *channel_state_lock;
4559                 match channel_state.by_id.entry(msg.channel_id) {
4560                         hash_map::Entry::Occupied(mut chan) => {
4561                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4562                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4563                                 }
4564                                 let (revoke_and_ack, commitment_signed, monitor_update) =
4565                                         match chan.get_mut().commitment_signed(&msg, &self.logger) {
4566                                                 Err((None, e)) => try_chan_entry!(self, Err(e), channel_state, chan),
4567                                                 Err((Some(update), e)) => {
4568                                                         assert!(chan.get().is_awaiting_monitor_update());
4569                                                         let _ = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), update);
4570                                                         try_chan_entry!(self, Err(e), channel_state, chan);
4571                                                         unreachable!();
4572                                                 },
4573                                                 Ok(res) => res
4574                                         };
4575                                 if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) {
4576                                         return_monitor_err!(self, e, channel_state, chan, RAACommitmentOrder::RevokeAndACKFirst, true, commitment_signed.is_some());
4577                                 }
4578                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK {
4579                                         node_id: counterparty_node_id.clone(),
4580                                         msg: revoke_and_ack,
4581                                 });
4582                                 if let Some(msg) = commitment_signed {
4583                                         channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
4584                                                 node_id: counterparty_node_id.clone(),
4585                                                 updates: msgs::CommitmentUpdate {
4586                                                         update_add_htlcs: Vec::new(),
4587                                                         update_fulfill_htlcs: Vec::new(),
4588                                                         update_fail_htlcs: Vec::new(),
4589                                                         update_fail_malformed_htlcs: Vec::new(),
4590                                                         update_fee: None,
4591                                                         commitment_signed: msg,
4592                                                 },
4593                                         });
4594                                 }
4595                                 Ok(())
4596                         },
4597                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4598                 }
4599         }
4600
4601         #[inline]
4602         fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)]) {
4603                 for &mut (prev_short_channel_id, prev_funding_outpoint, ref mut pending_forwards) in per_source_pending_forwards {
4604                         let mut forward_event = None;
4605                         if !pending_forwards.is_empty() {
4606                                 let mut channel_state = self.channel_state.lock().unwrap();
4607                                 if channel_state.forward_htlcs.is_empty() {
4608                                         forward_event = Some(Duration::from_millis(MIN_HTLC_RELAY_HOLDING_CELL_MILLIS))
4609                                 }
4610                                 for (forward_info, prev_htlc_id) in pending_forwards.drain(..) {
4611                                         match channel_state.forward_htlcs.entry(match forward_info.routing {
4612                                                         PendingHTLCRouting::Forward { short_channel_id, .. } => short_channel_id,
4613                                                         PendingHTLCRouting::Receive { .. } => 0,
4614                                                         PendingHTLCRouting::ReceiveKeysend { .. } => 0,
4615                                         }) {
4616                                                 hash_map::Entry::Occupied(mut entry) => {
4617                                                         entry.get_mut().push(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
4618                                                                                                         prev_htlc_id, forward_info });
4619                                                 },
4620                                                 hash_map::Entry::Vacant(entry) => {
4621                                                         entry.insert(vec!(HTLCForwardInfo::AddHTLC { prev_short_channel_id, prev_funding_outpoint,
4622                                                                                                      prev_htlc_id, forward_info }));
4623                                                 }
4624                                         }
4625                                 }
4626                         }
4627                         match forward_event {
4628                                 Some(time) => {
4629                                         let mut pending_events = self.pending_events.lock().unwrap();
4630                                         pending_events.push(events::Event::PendingHTLCsForwardable {
4631                                                 time_forwardable: time
4632                                         });
4633                                 }
4634                                 None => {},
4635                         }
4636                 }
4637         }
4638
4639         fn internal_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) -> Result<(), MsgHandleErrInternal> {
4640                 let mut htlcs_to_fail = Vec::new();
4641                 let res = loop {
4642                         let mut channel_state_lock = self.channel_state.lock().unwrap();
4643                         let channel_state = &mut *channel_state_lock;
4644                         match channel_state.by_id.entry(msg.channel_id) {
4645                                 hash_map::Entry::Occupied(mut chan) => {
4646                                         if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4647                                                 break Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4648                                         }
4649                                         let was_frozen_for_monitor = chan.get().is_awaiting_monitor_update();
4650                                         let raa_updates = break_chan_entry!(self,
4651                                                 chan.get_mut().revoke_and_ack(&msg, &self.logger), channel_state, chan);
4652                                         htlcs_to_fail = raa_updates.holding_cell_failed_htlcs;
4653                                         if let Err(e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), raa_updates.monitor_update) {
4654                                                 if was_frozen_for_monitor {
4655                                                         assert!(raa_updates.commitment_update.is_none());
4656                                                         assert!(raa_updates.accepted_htlcs.is_empty());
4657                                                         assert!(raa_updates.failed_htlcs.is_empty());
4658                                                         assert!(raa_updates.finalized_claimed_htlcs.is_empty());
4659                                                         break Err(MsgHandleErrInternal::ignore_no_close("Previous monitor update failure prevented responses to RAA".to_owned()));
4660                                                 } else {
4661                                                         if let Err(e) = handle_monitor_err!(self, e, channel_state, chan,
4662                                                                         RAACommitmentOrder::CommitmentFirst, false,
4663                                                                         raa_updates.commitment_update.is_some(),
4664                                                                         raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
4665                                                                         raa_updates.finalized_claimed_htlcs) {
4666                                                                 break Err(e);
4667                                                         } else { unreachable!(); }
4668                                                 }
4669                                         }
4670                                         if let Some(updates) = raa_updates.commitment_update {
4671                                                 channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
4672                                                         node_id: counterparty_node_id.clone(),
4673                                                         updates,
4674                                                 });
4675                                         }
4676                                         break Ok((raa_updates.accepted_htlcs, raa_updates.failed_htlcs,
4677                                                         raa_updates.finalized_claimed_htlcs,
4678                                                         chan.get().get_short_channel_id()
4679                                                                 .expect("RAA should only work on a short-id-available channel"),
4680                                                         chan.get().get_funding_txo().unwrap()))
4681                                 },
4682                                 hash_map::Entry::Vacant(_) => break Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4683                         }
4684                 };
4685                 self.fail_holding_cell_htlcs(htlcs_to_fail, msg.channel_id);
4686                 match res {
4687                         Ok((pending_forwards, mut pending_failures, finalized_claim_htlcs,
4688                                 short_channel_id, channel_outpoint)) =>
4689                         {
4690                                 for failure in pending_failures.drain(..) {
4691                                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), failure.0, &failure.1, failure.2);
4692                                 }
4693                                 self.forward_htlcs(&mut [(short_channel_id, channel_outpoint, pending_forwards)]);
4694                                 self.finalize_claims(finalized_claim_htlcs);
4695                                 Ok(())
4696                         },
4697                         Err(e) => Err(e)
4698                 }
4699         }
4700
4701         fn internal_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) -> Result<(), MsgHandleErrInternal> {
4702                 let mut channel_lock = self.channel_state.lock().unwrap();
4703                 let channel_state = &mut *channel_lock;
4704                 match channel_state.by_id.entry(msg.channel_id) {
4705                         hash_map::Entry::Occupied(mut chan) => {
4706                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4707                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4708                                 }
4709                                 try_chan_entry!(self, chan.get_mut().update_fee(&self.fee_estimator, &msg), channel_state, chan);
4710                         },
4711                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4712                 }
4713                 Ok(())
4714         }
4715
4716         fn internal_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) -> Result<(), MsgHandleErrInternal> {
4717                 let mut channel_state_lock = self.channel_state.lock().unwrap();
4718                 let channel_state = &mut *channel_state_lock;
4719
4720                 match channel_state.by_id.entry(msg.channel_id) {
4721                         hash_map::Entry::Occupied(mut chan) => {
4722                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4723                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4724                                 }
4725                                 if !chan.get().is_usable() {
4726                                         return Err(MsgHandleErrInternal::from_no_close(LightningError{err: "Got an announcement_signatures before we were ready for it".to_owned(), action: msgs::ErrorAction::IgnoreError}));
4727                                 }
4728
4729                                 channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
4730                                         msg: try_chan_entry!(self, chan.get_mut().announcement_signatures(
4731                                                 self.get_our_node_id(), self.genesis_hash.clone(), self.best_block.read().unwrap().height(), msg), channel_state, chan),
4732                                         // Note that announcement_signatures fails if the channel cannot be announced,
4733                                         // so get_channel_update_for_broadcast will never fail by the time we get here.
4734                                         update_msg: self.get_channel_update_for_broadcast(chan.get()).unwrap(),
4735                                 });
4736                         },
4737                         hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4738                 }
4739                 Ok(())
4740         }
4741
4742         /// Returns ShouldPersist if anything changed, otherwise either SkipPersist or an Err.
4743         fn internal_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) -> Result<NotifyOption, MsgHandleErrInternal> {
4744                 let mut channel_state_lock = self.channel_state.lock().unwrap();
4745                 let channel_state = &mut *channel_state_lock;
4746                 let chan_id = match channel_state.short_to_id.get(&msg.contents.short_channel_id) {
4747                         Some(chan_id) => chan_id.clone(),
4748                         None => {
4749                                 // It's not a local channel
4750                                 return Ok(NotifyOption::SkipPersist)
4751                         }
4752                 };
4753                 match channel_state.by_id.entry(chan_id) {
4754                         hash_map::Entry::Occupied(mut chan) => {
4755                                 if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4756                                         if chan.get().should_announce() {
4757                                                 // If the announcement is about a channel of ours which is public, some
4758                                                 // other peer may simply be forwarding all its gossip to us. Don't provide
4759                                                 // a scary-looking error message and return Ok instead.
4760                                                 return Ok(NotifyOption::SkipPersist);
4761                                         }
4762                                         return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a channel_update for a channel from the wrong node - it shouldn't know about our private channels!".to_owned(), chan_id));
4763                                 }
4764                                 let were_node_one = self.get_our_node_id().serialize()[..] < chan.get().get_counterparty_node_id().serialize()[..];
4765                                 let msg_from_node_one = msg.contents.flags & 1 == 0;
4766                                 if were_node_one == msg_from_node_one {
4767                                         return Ok(NotifyOption::SkipPersist);
4768                                 } else {
4769                                         try_chan_entry!(self, chan.get_mut().channel_update(&msg), channel_state, chan);
4770                                 }
4771                         },
4772                         hash_map::Entry::Vacant(_) => unreachable!()
4773                 }
4774                 Ok(NotifyOption::DoPersist)
4775         }
4776
4777         fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
4778                 let chan_restoration_res;
4779                 let (htlcs_failed_forward, need_lnd_workaround) = {
4780                         let mut channel_state_lock = self.channel_state.lock().unwrap();
4781                         let channel_state = &mut *channel_state_lock;
4782
4783                         match channel_state.by_id.entry(msg.channel_id) {
4784                                 hash_map::Entry::Occupied(mut chan) => {
4785                                         if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4786                                                 return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4787                                         }
4788                                         // Currently, we expect all holding cell update_adds to be dropped on peer
4789                                         // disconnect, so Channel's reestablish will never hand us any holding cell
4790                                         // freed HTLCs to fail backwards. If in the future we no longer drop pending
4791                                         // add-HTLCs on disconnect, we may be handed HTLCs to fail backwards here.
4792                                         let responses = try_chan_entry!(self, chan.get_mut().channel_reestablish(
4793                                                 msg, &self.logger, self.our_network_pubkey.clone(), self.genesis_hash,
4794                                                 &*self.best_block.read().unwrap()), channel_state, chan);
4795                                         let mut channel_update = None;
4796                                         if let Some(msg) = responses.shutdown_msg {
4797                                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
4798                                                         node_id: counterparty_node_id.clone(),
4799                                                         msg,
4800                                                 });
4801                                         } else if chan.get().is_usable() {
4802                                                 // If the channel is in a usable state (ie the channel is not being shut
4803                                                 // down), send a unicast channel_update to our counterparty to make sure
4804                                                 // they have the latest channel parameters.
4805                                                 if let Ok(msg) = self.get_channel_update_for_unicast(chan.get()) {
4806                                                         channel_update = Some(events::MessageSendEvent::SendChannelUpdate {
4807                                                                 node_id: chan.get().get_counterparty_node_id(),
4808                                                                 msg,
4809                                                         });
4810                                                 }
4811                                         }
4812                                         let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
4813                                         chan_restoration_res = handle_chan_restoration_locked!(
4814                                                 self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
4815                                                 responses.mon_update, Vec::new(), None, responses.funding_locked, responses.announcement_sigs);
4816                                         if let Some(upd) = channel_update {
4817                                                 channel_state.pending_msg_events.push(upd);
4818                                         }
4819                                         (responses.holding_cell_failed_htlcs, need_lnd_workaround)
4820                                 },
4821                                 hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4822                         }
4823                 };
4824                 post_handle_chan_restoration!(self, chan_restoration_res);
4825                 self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id);
4826
4827                 if let Some(funding_locked_msg) = need_lnd_workaround {
4828                         self.internal_funding_locked(counterparty_node_id, &funding_locked_msg)?;
4829                 }
4830                 Ok(())
4831         }
4832
4833         /// Process pending events from the `chain::Watch`, returning whether any events were processed.
4834         fn process_pending_monitor_events(&self) -> bool {
4835                 let mut failed_channels = Vec::new();
4836                 let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events();
4837                 let has_pending_monitor_events = !pending_monitor_events.is_empty();
4838                 for (funding_outpoint, mut monitor_events) in pending_monitor_events.drain(..) {
4839                         for monitor_event in monitor_events.drain(..) {
4840                                 match monitor_event {
4841                                         MonitorEvent::HTLCEvent(htlc_update) => {
4842                                                 if let Some(preimage) = htlc_update.payment_preimage {
4843                                                         log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
4844                                                         self.claim_funds_internal(self.channel_state.lock().unwrap(), htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
4845                                                 } else {
4846                                                         log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
4847                                                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), htlc_update.source, &htlc_update.payment_hash, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
4848                                                 }
4849                                         },
4850                                         MonitorEvent::CommitmentTxConfirmed(funding_outpoint) |
4851                                         MonitorEvent::UpdateFailed(funding_outpoint) => {
4852                                                 let mut channel_lock = self.channel_state.lock().unwrap();
4853                                                 let channel_state = &mut *channel_lock;
4854                                                 let by_id = &mut channel_state.by_id;
4855                                                 let pending_msg_events = &mut channel_state.pending_msg_events;
4856                                                 if let hash_map::Entry::Occupied(chan_entry) = by_id.entry(funding_outpoint.to_channel_id()) {
4857                                                         let mut chan = remove_channel!(self, channel_state, chan_entry);
4858                                                         failed_channels.push(chan.force_shutdown(false));
4859                                                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
4860                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
4861                                                                         msg: update
4862                                                                 });
4863                                                         }
4864                                                         let reason = if let MonitorEvent::UpdateFailed(_) = monitor_event {
4865                                                                 ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() }
4866                                                         } else {
4867                                                                 ClosureReason::CommitmentTxConfirmed
4868                                                         };
4869                                                         self.issue_channel_close_events(&chan, reason);
4870                                                         pending_msg_events.push(events::MessageSendEvent::HandleError {
4871                                                                 node_id: chan.get_counterparty_node_id(),
4872                                                                 action: msgs::ErrorAction::SendErrorMessage {
4873                                                                         msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
4874                                                                 },
4875                                                         });
4876                                                 }
4877                                         },
4878                                         MonitorEvent::UpdateCompleted { funding_txo, monitor_update_id } => {
4879                                                 self.channel_monitor_updated(&funding_txo, monitor_update_id);
4880                                         },
4881                                 }
4882                         }
4883                 }
4884
4885                 for failure in failed_channels.drain(..) {
4886                         self.finish_force_close_channel(failure);
4887                 }
4888
4889                 has_pending_monitor_events
4890         }
4891
4892         /// In chanmon_consistency_target, we'd like to be able to restore monitor updating without
4893         /// handling all pending events (i.e. not PendingHTLCsForwardable). Thus, we expose monitor
4894         /// update events as a separate process method here.
4895         #[cfg(fuzzing)]
4896         pub fn process_monitor_events(&self) {
4897                 self.process_pending_monitor_events();
4898         }
4899
4900         /// Check the holding cell in each channel and free any pending HTLCs in them if possible.
4901         /// Returns whether there were any updates such as if pending HTLCs were freed or a monitor
4902         /// update was applied.
4903         ///
4904         /// This should only apply to HTLCs which were added to the holding cell because we were
4905         /// waiting on a monitor update to finish. In that case, we don't want to free the holding cell
4906         /// directly in `channel_monitor_updated` as it may introduce deadlocks calling back into user
4907         /// code to inform them of a channel monitor update.
4908         fn check_free_holding_cells(&self) -> bool {
4909                 let mut has_monitor_update = false;
4910                 let mut failed_htlcs = Vec::new();
4911                 let mut handle_errors = Vec::new();
4912                 {
4913                         let mut channel_state_lock = self.channel_state.lock().unwrap();
4914                         let channel_state = &mut *channel_state_lock;
4915                         let by_id = &mut channel_state.by_id;
4916                         let short_to_id = &mut channel_state.short_to_id;
4917                         let pending_msg_events = &mut channel_state.pending_msg_events;
4918
4919                         by_id.retain(|channel_id, chan| {
4920                                 match chan.maybe_free_holding_cell_htlcs(&self.logger) {
4921                                         Ok((commitment_opt, holding_cell_failed_htlcs)) => {
4922                                                 if !holding_cell_failed_htlcs.is_empty() {
4923                                                         failed_htlcs.push((holding_cell_failed_htlcs, *channel_id));
4924                                                 }
4925                                                 if let Some((commitment_update, monitor_update)) = commitment_opt {
4926                                                         if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) {
4927                                                                 has_monitor_update = true;
4928                                                                 let (res, close_channel) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, channel_id, COMMITMENT_UPDATE_ONLY);
4929                                                                 handle_errors.push((chan.get_counterparty_node_id(), res));
4930                                                                 if close_channel { return false; }
4931                                                         } else {
4932                                                                 pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
4933                                                                         node_id: chan.get_counterparty_node_id(),
4934                                                                         updates: commitment_update,
4935                                                                 });
4936                                                         }
4937                                                 }
4938                                                 true
4939                                         },
4940                                         Err(e) => {
4941                                                 let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
4942                                                 handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
4943                                                 // ChannelClosed event is generated by handle_error for us
4944                                                 !close_channel
4945                                         }
4946                                 }
4947                         });
4948                 }
4949
4950                 let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty();
4951                 for (failures, channel_id) in failed_htlcs.drain(..) {
4952                         self.fail_holding_cell_htlcs(failures, channel_id);
4953                 }
4954
4955                 for (counterparty_node_id, err) in handle_errors.drain(..) {
4956                         let _ = handle_error!(self, err, counterparty_node_id);
4957                 }
4958
4959                 has_update
4960         }
4961
4962         /// Check whether any channels have finished removing all pending updates after a shutdown
4963         /// exchange and can now send a closing_signed.
4964         /// Returns whether any closing_signed messages were generated.
4965         fn maybe_generate_initial_closing_signed(&self) -> bool {
4966                 let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
4967                 let mut has_update = false;
4968                 {
4969                         let mut channel_state_lock = self.channel_state.lock().unwrap();
4970                         let channel_state = &mut *channel_state_lock;
4971                         let by_id = &mut channel_state.by_id;
4972                         let short_to_id = &mut channel_state.short_to_id;
4973                         let pending_msg_events = &mut channel_state.pending_msg_events;
4974
4975                         by_id.retain(|channel_id, chan| {
4976                                 match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
4977                                         Ok((msg_opt, tx_opt)) => {
4978                                                 if let Some(msg) = msg_opt {
4979                                                         has_update = true;
4980                                                         pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
4981                                                                 node_id: chan.get_counterparty_node_id(), msg,
4982                                                         });
4983                                                 }
4984                                                 if let Some(tx) = tx_opt {
4985                                                         // We're done with this channel. We got a closing_signed and sent back
4986                                                         // a closing_signed with a closing transaction to broadcast.
4987                                                         if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
4988                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
4989                                                                         msg: update
4990                                                                 });
4991                                                         }
4992
4993                                                         self.issue_channel_close_events(chan, ClosureReason::CooperativeClosure);
4994
4995                                                         log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
4996                                                         self.tx_broadcaster.broadcast_transaction(&tx);
4997                                                         update_maps_on_chan_removal!(self, short_to_id, chan);
4998                                                         false
4999                                                 } else { true }
5000                                         },
5001                                         Err(e) => {
5002                                                 has_update = true;
5003                                                 let (close_channel, res) = convert_chan_err!(self, e, short_to_id, chan, channel_id);
5004                                                 handle_errors.push((chan.get_counterparty_node_id(), Err(res)));
5005                                                 !close_channel
5006                                         }
5007                                 }
5008                         });
5009                 }
5010
5011                 for (counterparty_node_id, err) in handle_errors.drain(..) {
5012                         let _ = handle_error!(self, err, counterparty_node_id);
5013                 }
5014
5015                 has_update
5016         }
5017
5018         /// Handle a list of channel failures during a block_connected or block_disconnected call,
5019         /// pushing the channel monitor update (if any) to the background events queue and removing the
5020         /// Channel object.
5021         fn handle_init_event_channel_failures(&self, mut failed_channels: Vec<ShutdownResult>) {
5022                 for mut failure in failed_channels.drain(..) {
5023                         // Either a commitment transactions has been confirmed on-chain or
5024                         // Channel::block_disconnected detected that the funding transaction has been
5025                         // reorganized out of the main chain.
5026                         // We cannot broadcast our latest local state via monitor update (as
5027                         // Channel::force_shutdown tries to make us do) as we may still be in initialization,
5028                         // so we track the update internally and handle it when the user next calls
5029                         // timer_tick_occurred, guaranteeing we're running normally.
5030                         if let Some((funding_txo, update)) = failure.0.take() {
5031                                 assert_eq!(update.updates.len(), 1);
5032                                 if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
5033                                         assert!(should_broadcast);
5034                                 } else { unreachable!(); }
5035                                 self.pending_background_events.lock().unwrap().push(BackgroundEvent::ClosingMonitorUpdate((funding_txo, update)));
5036                         }
5037                         self.finish_force_close_channel(failure);
5038                 }
5039         }
5040
5041         fn set_payment_hash_secret_map(&self, payment_hash: PaymentHash, payment_preimage: Option<PaymentPreimage>, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
5042                 assert!(invoice_expiry_delta_secs <= 60*60*24*365); // Sadly bitcoin timestamps are u32s, so panic before 2106
5043
5044                 if min_value_msat.is_some() && min_value_msat.unwrap() > MAX_VALUE_MSAT {
5045                         return Err(APIError::APIMisuseError { err: format!("min_value_msat of {} greater than total 21 million bitcoin supply", min_value_msat.unwrap()) });
5046                 }
5047
5048                 let payment_secret = PaymentSecret(self.keys_manager.get_secure_random_bytes());
5049
5050                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5051                 let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
5052                 match payment_secrets.entry(payment_hash) {
5053                         hash_map::Entry::Vacant(e) => {
5054                                 e.insert(PendingInboundPayment {
5055                                         payment_secret, min_value_msat, payment_preimage,
5056                                         user_payment_id: 0, // For compatibility with version 0.0.103 and earlier
5057                                         // We assume that highest_seen_timestamp is pretty close to the current time -
5058                                         // it's updated when we receive a new block with the maximum time we've seen in
5059                                         // a header. It should never be more than two hours in the future.
5060                                         // Thus, we add two hours here as a buffer to ensure we absolutely
5061                                         // never fail a payment too early.
5062                                         // Note that we assume that received blocks have reasonably up-to-date
5063                                         // timestamps.
5064                                         expiry_time: self.highest_seen_timestamp.load(Ordering::Acquire) as u64 + invoice_expiry_delta_secs as u64 + 7200,
5065                                 });
5066                         },
5067                         hash_map::Entry::Occupied(_) => return Err(APIError::APIMisuseError { err: "Duplicate payment hash".to_owned() }),
5068                 }
5069                 Ok(payment_secret)
5070         }
5071
5072         /// Gets a payment secret and payment hash for use in an invoice given to a third party wishing
5073         /// to pay us.
5074         ///
5075         /// This differs from [`create_inbound_payment_for_hash`] only in that it generates the
5076         /// [`PaymentHash`] and [`PaymentPreimage`] for you.
5077         ///
5078         /// The [`PaymentPreimage`] will ultimately be returned to you in the [`PaymentReceived`], which
5079         /// will have the [`PaymentReceived::payment_preimage`] field filled in. That should then be
5080         /// passed directly to [`claim_funds`].
5081         ///
5082         /// See [`create_inbound_payment_for_hash`] for detailed documentation on behavior and requirements.
5083         ///
5084         /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
5085         /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
5086         ///
5087         /// # Note
5088         ///
5089         /// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
5090         /// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
5091         ///
5092         /// Errors if `min_value_msat` is greater than total bitcoin supply.
5093         ///
5094         /// [`claim_funds`]: Self::claim_funds
5095         /// [`PaymentReceived`]: events::Event::PaymentReceived
5096         /// [`PaymentReceived::payment_preimage`]: events::Event::PaymentReceived::payment_preimage
5097         /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
5098         pub fn create_inbound_payment(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), ()> {
5099                 inbound_payment::create(&self.inbound_payment_key, min_value_msat, invoice_expiry_delta_secs, &self.keys_manager, self.highest_seen_timestamp.load(Ordering::Acquire) as u64)
5100         }
5101
5102         /// Legacy version of [`create_inbound_payment`]. Use this method if you wish to share
5103         /// serialized state with LDK node(s) running 0.0.103 and earlier.
5104         ///
5105         /// May panic if `invoice_expiry_delta_secs` is greater than one year.
5106         ///
5107         /// # Note
5108         /// This method is deprecated and will be removed soon.
5109         ///
5110         /// [`create_inbound_payment`]: Self::create_inbound_payment
5111         #[deprecated]
5112         pub fn create_inbound_payment_legacy(&self, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<(PaymentHash, PaymentSecret), APIError> {
5113                 let payment_preimage = PaymentPreimage(self.keys_manager.get_secure_random_bytes());
5114                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0).into_inner());
5115                 let payment_secret = self.set_payment_hash_secret_map(payment_hash, Some(payment_preimage), min_value_msat, invoice_expiry_delta_secs)?;
5116                 Ok((payment_hash, payment_secret))
5117         }
5118
5119         /// Gets a [`PaymentSecret`] for a given [`PaymentHash`], for which the payment preimage is
5120         /// stored external to LDK.
5121         ///
5122         /// A [`PaymentReceived`] event will only be generated if the [`PaymentSecret`] matches a
5123         /// payment secret fetched via this method or [`create_inbound_payment`], and which is at least
5124         /// the `min_value_msat` provided here, if one is provided.
5125         ///
5126         /// The [`PaymentHash`] (and corresponding [`PaymentPreimage`]) should be globally unique, though
5127         /// note that LDK will not stop you from registering duplicate payment hashes for inbound
5128         /// payments.
5129         ///
5130         /// `min_value_msat` should be set if the invoice being generated contains a value. Any payment
5131         /// received for the returned [`PaymentHash`] will be required to be at least `min_value_msat`
5132         /// before a [`PaymentReceived`] event will be generated, ensuring that we do not provide the
5133         /// sender "proof-of-payment" unless they have paid the required amount.
5134         ///
5135         /// `invoice_expiry_delta_secs` describes the number of seconds that the invoice is valid for
5136         /// in excess of the current time. This should roughly match the expiry time set in the invoice.
5137         /// After this many seconds, we will remove the inbound payment, resulting in any attempts to
5138         /// pay the invoice failing. The BOLT spec suggests 3,600 secs as a default validity time for
5139         /// invoices when no timeout is set.
5140         ///
5141         /// Note that we use block header time to time-out pending inbound payments (with some margin
5142         /// to compensate for the inaccuracy of block header timestamps). Thus, in practice we will
5143         /// accept a payment and generate a [`PaymentReceived`] event for some time after the expiry.
5144         /// If you need exact expiry semantics, you should enforce them upon receipt of
5145         /// [`PaymentReceived`].
5146         ///
5147         /// Note that invoices generated for inbound payments should have their `min_final_cltv_expiry`
5148         /// set to at least [`MIN_FINAL_CLTV_EXPIRY`].
5149         ///
5150         /// Note that a malicious eavesdropper can intuit whether an inbound payment was created by
5151         /// `create_inbound_payment` or `create_inbound_payment_for_hash` based on runtime.
5152         ///
5153         /// # Note
5154         ///
5155         /// If you register an inbound payment with this method, then serialize the `ChannelManager`, then
5156         /// deserialize it with a node running 0.0.103 and earlier, the payment will fail to be received.
5157         ///
5158         /// Errors if `min_value_msat` is greater than total bitcoin supply.
5159         ///
5160         /// [`create_inbound_payment`]: Self::create_inbound_payment
5161         /// [`PaymentReceived`]: events::Event::PaymentReceived
5162         pub fn create_inbound_payment_for_hash(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, ()> {
5163                 inbound_payment::create_from_hash(&self.inbound_payment_key, min_value_msat, payment_hash, invoice_expiry_delta_secs, self.highest_seen_timestamp.load(Ordering::Acquire) as u64)
5164         }
5165
5166         /// Legacy version of [`create_inbound_payment_for_hash`]. Use this method if you wish to share
5167         /// serialized state with LDK node(s) running 0.0.103 and earlier.
5168         ///
5169         /// May panic if `invoice_expiry_delta_secs` is greater than one year.
5170         ///
5171         /// # Note
5172         /// This method is deprecated and will be removed soon.
5173         ///
5174         /// [`create_inbound_payment_for_hash`]: Self::create_inbound_payment_for_hash
5175         #[deprecated]
5176         pub fn create_inbound_payment_for_hash_legacy(&self, payment_hash: PaymentHash, min_value_msat: Option<u64>, invoice_expiry_delta_secs: u32) -> Result<PaymentSecret, APIError> {
5177                 self.set_payment_hash_secret_map(payment_hash, None, min_value_msat, invoice_expiry_delta_secs)
5178         }
5179
5180         /// Gets an LDK-generated payment preimage from a payment hash and payment secret that were
5181         /// previously returned from [`create_inbound_payment`].
5182         ///
5183         /// [`create_inbound_payment`]: Self::create_inbound_payment
5184         pub fn get_payment_preimage(&self, payment_hash: PaymentHash, payment_secret: PaymentSecret) -> Result<PaymentPreimage, APIError> {
5185                 inbound_payment::get_payment_preimage(payment_hash, payment_secret, &self.inbound_payment_key)
5186         }
5187
5188         /// Gets a fake short channel id for use in receiving [phantom node payments]. These fake scids
5189         /// are used when constructing the phantom invoice's route hints.
5190         ///
5191         /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
5192         pub fn get_phantom_scid(&self) -> u64 {
5193                 let mut channel_state = self.channel_state.lock().unwrap();
5194                 let best_block = self.best_block.read().unwrap();
5195                 loop {
5196                         let scid_candidate = fake_scid::Namespace::Phantom.get_fake_scid(best_block.height(), &self.genesis_hash, &self.fake_scid_rand_bytes, &self.keys_manager);
5197                         // Ensure the generated scid doesn't conflict with a real channel.
5198                         match channel_state.short_to_id.entry(scid_candidate) {
5199                                 hash_map::Entry::Occupied(_) => continue,
5200                                 hash_map::Entry::Vacant(_) => return scid_candidate
5201                         }
5202                 }
5203         }
5204
5205         /// Gets route hints for use in receiving [phantom node payments].
5206         ///
5207         /// [phantom node payments]: crate::chain::keysinterface::PhantomKeysManager
5208         pub fn get_phantom_route_hints(&self) -> PhantomRouteHints {
5209                 PhantomRouteHints {
5210                         channels: self.list_usable_channels(),
5211                         phantom_scid: self.get_phantom_scid(),
5212                         real_node_pubkey: self.get_our_node_id(),
5213                 }
5214         }
5215
5216         #[cfg(any(test, fuzzing, feature = "_test_utils"))]
5217         pub fn get_and_clear_pending_events(&self) -> Vec<events::Event> {
5218                 let events = core::cell::RefCell::new(Vec::new());
5219                 let event_handler = |event: &events::Event| events.borrow_mut().push(event.clone());
5220                 self.process_pending_events(&event_handler);
5221                 events.into_inner()
5222         }
5223
5224         #[cfg(test)]
5225         pub fn has_pending_payments(&self) -> bool {
5226                 !self.pending_outbound_payments.lock().unwrap().is_empty()
5227         }
5228
5229         #[cfg(test)]
5230         pub fn clear_pending_payments(&self) {
5231                 self.pending_outbound_payments.lock().unwrap().clear()
5232         }
5233 }
5234
5235 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> MessageSendEventsProvider for ChannelManager<Signer, M, T, K, F, L>
5236         where M::Target: chain::Watch<Signer>,
5237         T::Target: BroadcasterInterface,
5238         K::Target: KeysInterface<Signer = Signer>,
5239         F::Target: FeeEstimator,
5240                                 L::Target: Logger,
5241 {
5242         fn get_and_clear_pending_msg_events(&self) -> Vec<MessageSendEvent> {
5243                 let events = RefCell::new(Vec::new());
5244                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
5245                         let mut result = NotifyOption::SkipPersist;
5246
5247                         // TODO: This behavior should be documented. It's unintuitive that we query
5248                         // ChannelMonitors when clearing other events.
5249                         if self.process_pending_monitor_events() {
5250                                 result = NotifyOption::DoPersist;
5251                         }
5252
5253                         if self.check_free_holding_cells() {
5254                                 result = NotifyOption::DoPersist;
5255                         }
5256                         if self.maybe_generate_initial_closing_signed() {
5257                                 result = NotifyOption::DoPersist;
5258                         }
5259
5260                         let mut pending_events = Vec::new();
5261                         let mut channel_state = self.channel_state.lock().unwrap();
5262                         mem::swap(&mut pending_events, &mut channel_state.pending_msg_events);
5263
5264                         if !pending_events.is_empty() {
5265                                 events.replace(pending_events);
5266                         }
5267
5268                         result
5269                 });
5270                 events.into_inner()
5271         }
5272 }
5273
5274 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> EventsProvider for ChannelManager<Signer, M, T, K, F, L>
5275 where
5276         M::Target: chain::Watch<Signer>,
5277         T::Target: BroadcasterInterface,
5278         K::Target: KeysInterface<Signer = Signer>,
5279         F::Target: FeeEstimator,
5280         L::Target: Logger,
5281 {
5282         /// Processes events that must be periodically handled.
5283         ///
5284         /// An [`EventHandler`] may safely call back to the provider in order to handle an event.
5285         /// However, it must not call [`Writeable::write`] as doing so would result in a deadlock.
5286         ///
5287         /// Pending events are persisted as part of [`ChannelManager`]. While these events are cleared
5288         /// when processed, an [`EventHandler`] must be able to handle previously seen events when
5289         /// restarting from an old state.
5290         fn process_pending_events<H: Deref>(&self, handler: H) where H::Target: EventHandler {
5291                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
5292                         let mut result = NotifyOption::SkipPersist;
5293
5294                         // TODO: This behavior should be documented. It's unintuitive that we query
5295                         // ChannelMonitors when clearing other events.
5296                         if self.process_pending_monitor_events() {
5297                                 result = NotifyOption::DoPersist;
5298                         }
5299
5300                         let mut pending_events = mem::replace(&mut *self.pending_events.lock().unwrap(), vec![]);
5301                         if !pending_events.is_empty() {
5302                                 result = NotifyOption::DoPersist;
5303                         }
5304
5305                         for event in pending_events.drain(..) {
5306                                 handler.handle_event(&event);
5307                         }
5308
5309                         result
5310                 });
5311         }
5312 }
5313
5314 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Listen for ChannelManager<Signer, M, T, K, F, L>
5315 where
5316         M::Target: chain::Watch<Signer>,
5317         T::Target: BroadcasterInterface,
5318         K::Target: KeysInterface<Signer = Signer>,
5319         F::Target: FeeEstimator,
5320         L::Target: Logger,
5321 {
5322         fn filtered_block_connected(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
5323                 {
5324                         let best_block = self.best_block.read().unwrap();
5325                         assert_eq!(best_block.block_hash(), header.prev_blockhash,
5326                                 "Blocks must be connected in chain-order - the connected header must build on the last connected header");
5327                         assert_eq!(best_block.height(), height - 1,
5328                                 "Blocks must be connected in chain-order - the connected block height must be one greater than the previous height");
5329                 }
5330
5331                 self.transactions_confirmed(header, txdata, height);
5332                 self.best_block_updated(header, height);
5333         }
5334
5335         fn block_disconnected(&self, header: &BlockHeader, height: u32) {
5336                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5337                 let new_height = height - 1;
5338                 {
5339                         let mut best_block = self.best_block.write().unwrap();
5340                         assert_eq!(best_block.block_hash(), header.block_hash(),
5341                                 "Blocks must be disconnected in chain-order - the disconnected header must be the last connected header");
5342                         assert_eq!(best_block.height(), height,
5343                                 "Blocks must be disconnected in chain-order - the disconnected block must have the correct height");
5344                         *best_block = BestBlock::new(header.prev_blockhash, new_height)
5345                 }
5346
5347                 self.do_chain_event(Some(new_height), |channel| channel.best_block_updated(new_height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
5348         }
5349 }
5350
5351 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> chain::Confirm for ChannelManager<Signer, M, T, K, F, L>
5352 where
5353         M::Target: chain::Watch<Signer>,
5354         T::Target: BroadcasterInterface,
5355         K::Target: KeysInterface<Signer = Signer>,
5356         F::Target: FeeEstimator,
5357         L::Target: Logger,
5358 {
5359         fn transactions_confirmed(&self, header: &BlockHeader, txdata: &TransactionData, height: u32) {
5360                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
5361                 // during initialization prior to the chain_monitor being fully configured in some cases.
5362                 // See the docs for `ChannelManagerReadArgs` for more.
5363
5364                 let block_hash = header.block_hash();
5365                 log_trace!(self.logger, "{} transactions included in block {} at height {} provided", txdata.len(), block_hash, height);
5366
5367                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5368                 self.do_chain_event(Some(height), |channel| channel.transactions_confirmed(&block_hash, height, txdata, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger)
5369                         .map(|(a, b)| (a, Vec::new(), b)));
5370
5371                 let last_best_block_height = self.best_block.read().unwrap().height();
5372                 if height < last_best_block_height {
5373                         let timestamp = self.highest_seen_timestamp.load(Ordering::Acquire);
5374                         self.do_chain_event(Some(last_best_block_height), |channel| channel.best_block_updated(last_best_block_height, timestamp as u32, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
5375                 }
5376         }
5377
5378         fn best_block_updated(&self, header: &BlockHeader, height: u32) {
5379                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
5380                 // during initialization prior to the chain_monitor being fully configured in some cases.
5381                 // See the docs for `ChannelManagerReadArgs` for more.
5382
5383                 let block_hash = header.block_hash();
5384                 log_trace!(self.logger, "New best block: {} at height {}", block_hash, height);
5385
5386                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5387
5388                 *self.best_block.write().unwrap() = BestBlock::new(block_hash, height);
5389
5390                 self.do_chain_event(Some(height), |channel| channel.best_block_updated(height, header.time, self.genesis_hash.clone(), self.get_our_node_id(), &self.logger));
5391
5392                 macro_rules! max_time {
5393                         ($timestamp: expr) => {
5394                                 loop {
5395                                         // Update $timestamp to be the max of its current value and the block
5396                                         // timestamp. This should keep us close to the current time without relying on
5397                                         // having an explicit local time source.
5398                                         // Just in case we end up in a race, we loop until we either successfully
5399                                         // update $timestamp or decide we don't need to.
5400                                         let old_serial = $timestamp.load(Ordering::Acquire);
5401                                         if old_serial >= header.time as usize { break; }
5402                                         if $timestamp.compare_exchange(old_serial, header.time as usize, Ordering::AcqRel, Ordering::Relaxed).is_ok() {
5403                                                 break;
5404                                         }
5405                                 }
5406                         }
5407                 }
5408                 max_time!(self.last_node_announcement_serial);
5409                 max_time!(self.highest_seen_timestamp);
5410                 let mut payment_secrets = self.pending_inbound_payments.lock().unwrap();
5411                 payment_secrets.retain(|_, inbound_payment| {
5412                         inbound_payment.expiry_time > header.time as u64
5413                 });
5414
5415                 let mut outbounds = self.pending_outbound_payments.lock().unwrap();
5416                 let mut pending_events = self.pending_events.lock().unwrap();
5417                 outbounds.retain(|payment_id, payment| {
5418                         if payment.remaining_parts() != 0 { return true }
5419                         if let PendingOutboundPayment::Retryable { starting_block_height, payment_hash, .. } = payment {
5420                                 if *starting_block_height + PAYMENT_EXPIRY_BLOCKS <= height {
5421                                         log_info!(self.logger, "Timing out payment with id {} and hash {}", log_bytes!(payment_id.0), log_bytes!(payment_hash.0));
5422                                         pending_events.push(events::Event::PaymentFailed {
5423                                                 payment_id: *payment_id, payment_hash: *payment_hash,
5424                                         });
5425                                         false
5426                                 } else { true }
5427                         } else { true }
5428                 });
5429         }
5430
5431         fn get_relevant_txids(&self) -> Vec<Txid> {
5432                 let channel_state = self.channel_state.lock().unwrap();
5433                 let mut res = Vec::with_capacity(channel_state.short_to_id.len());
5434                 for chan in channel_state.by_id.values() {
5435                         if let Some(funding_txo) = chan.get_funding_txo() {
5436                                 res.push(funding_txo.txid);
5437                         }
5438                 }
5439                 res
5440         }
5441
5442         fn transaction_unconfirmed(&self, txid: &Txid) {
5443                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5444                 self.do_chain_event(None, |channel| {
5445                         if let Some(funding_txo) = channel.get_funding_txo() {
5446                                 if funding_txo.txid == *txid {
5447                                         channel.funding_transaction_unconfirmed(&self.logger).map(|()| (None, Vec::new(), None))
5448                                 } else { Ok((None, Vec::new(), None)) }
5449                         } else { Ok((None, Vec::new(), None)) }
5450                 });
5451         }
5452 }
5453
5454 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<Signer, M, T, K, F, L>
5455 where
5456         M::Target: chain::Watch<Signer>,
5457         T::Target: BroadcasterInterface,
5458         K::Target: KeysInterface<Signer = Signer>,
5459         F::Target: FeeEstimator,
5460         L::Target: Logger,
5461 {
5462         /// Calls a function which handles an on-chain event (blocks dis/connected, transactions
5463         /// un/confirmed, etc) on each channel, handling any resulting errors or messages generated by
5464         /// the function.
5465         fn do_chain_event<FN: Fn(&mut Channel<Signer>) -> Result<(Option<msgs::FundingLocked>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>>
5466                         (&self, height_opt: Option<u32>, f: FN) {
5467                 // Note that we MUST NOT end up calling methods on self.chain_monitor here - we're called
5468                 // during initialization prior to the chain_monitor being fully configured in some cases.
5469                 // See the docs for `ChannelManagerReadArgs` for more.
5470
5471                 let mut failed_channels = Vec::new();
5472                 let mut timed_out_htlcs = Vec::new();
5473                 {
5474                         let mut channel_lock = self.channel_state.lock().unwrap();
5475                         let channel_state = &mut *channel_lock;
5476                         let short_to_id = &mut channel_state.short_to_id;
5477                         let pending_msg_events = &mut channel_state.pending_msg_events;
5478                         channel_state.by_id.retain(|_, channel| {
5479                                 let res = f(channel);
5480                                 if let Ok((funding_locked_opt, mut timed_out_pending_htlcs, announcement_sigs)) = res {
5481                                         for (source, payment_hash) in timed_out_pending_htlcs.drain(..) {
5482                                                 let (failure_code, data) = self.get_htlc_inbound_temp_fail_err_and_data(0x1000|14 /* expiry_too_soon */, &channel);
5483                                                 timed_out_htlcs.push((source, payment_hash, HTLCFailReason::Reason {
5484                                                         failure_code, data,
5485                                                 }));
5486                                         }
5487                                         if let Some(funding_locked) = funding_locked_opt {
5488                                                 send_funding_locked!(short_to_id, pending_msg_events, channel, funding_locked);
5489                                                 if channel.is_usable() {
5490                                                         log_trace!(self.logger, "Sending funding_locked with private initial channel_update for our counterparty on channel {}", log_bytes!(channel.channel_id()));
5491                                                         if let Ok(msg) = self.get_channel_update_for_unicast(channel) {
5492                                                                 pending_msg_events.push(events::MessageSendEvent::SendChannelUpdate {
5493                                                                         node_id: channel.get_counterparty_node_id(),
5494                                                                         msg,
5495                                                                 });
5496                                                         }
5497                                                 } else {
5498                                                         log_trace!(self.logger, "Sending funding_locked WITHOUT channel_update for {}", log_bytes!(channel.channel_id()));
5499                                                 }
5500                                         }
5501                                         if let Some(announcement_sigs) = announcement_sigs {
5502                                                 log_trace!(self.logger, "Sending announcement_signatures for channel {}", log_bytes!(channel.channel_id()));
5503                                                 pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures {
5504                                                         node_id: channel.get_counterparty_node_id(),
5505                                                         msg: announcement_sigs,
5506                                                 });
5507                                                 if let Some(height) = height_opt {
5508                                                         if let Some(announcement) = channel.get_signed_channel_announcement(self.get_our_node_id(), self.genesis_hash, height) {
5509                                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelAnnouncement {
5510                                                                         msg: announcement,
5511                                                                         // Note that announcement_signatures fails if the channel cannot be announced,
5512                                                                         // so get_channel_update_for_broadcast will never fail by the time we get here.
5513                                                                         update_msg: self.get_channel_update_for_broadcast(channel).unwrap(),
5514                                                                 });
5515                                                         }
5516                                                 }
5517                                         }
5518                                 } else if let Err(reason) = res {
5519                                         update_maps_on_chan_removal!(self, short_to_id, channel);
5520                                         // It looks like our counterparty went on-chain or funding transaction was
5521                                         // reorged out of the main chain. Close the channel.
5522                                         failed_channels.push(channel.force_shutdown(true));
5523                                         if let Ok(update) = self.get_channel_update_for_broadcast(&channel) {
5524                                                 pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
5525                                                         msg: update
5526                                                 });
5527                                         }
5528                                         let reason_message = format!("{}", reason);
5529                                         self.issue_channel_close_events(channel, reason);
5530                                         pending_msg_events.push(events::MessageSendEvent::HandleError {
5531                                                 node_id: channel.get_counterparty_node_id(),
5532                                                 action: msgs::ErrorAction::SendErrorMessage { msg: msgs::ErrorMessage {
5533                                                         channel_id: channel.channel_id(),
5534                                                         data: reason_message,
5535                                                 } },
5536                                         });
5537                                         return false;
5538                                 }
5539                                 true
5540                         });
5541
5542                         if let Some(height) = height_opt {
5543                                 channel_state.claimable_htlcs.retain(|payment_hash, htlcs| {
5544                                         htlcs.retain(|htlc| {
5545                                                 // If height is approaching the number of blocks we think it takes us to get
5546                                                 // our commitment transaction confirmed before the HTLC expires, plus the
5547                                                 // number of blocks we generally consider it to take to do a commitment update,
5548                                                 // just give up on it and fail the HTLC.
5549                                                 if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER {
5550                                                         let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec();
5551                                                         htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height));
5552                                                         timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason {
5553                                                                 failure_code: 0x4000 | 15,
5554                                                                 data: htlc_msat_height_data
5555                                                         }));
5556                                                         false
5557                                                 } else { true }
5558                                         });
5559                                         !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry.
5560                                 });
5561                         }
5562                 }
5563
5564                 self.handle_init_event_channel_failures(failed_channels);
5565
5566                 for (source, payment_hash, reason) in timed_out_htlcs.drain(..) {
5567                         self.fail_htlc_backwards_internal(self.channel_state.lock().unwrap(), source, &payment_hash, reason);
5568                 }
5569         }
5570
5571         /// Blocks until ChannelManager needs to be persisted or a timeout is reached. It returns a bool
5572         /// indicating whether persistence is necessary. Only one listener on
5573         /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
5574         /// up.
5575         ///
5576         /// Note that this method is not available with the `no-std` feature.
5577         #[cfg(any(test, feature = "std"))]
5578         pub fn await_persistable_update_timeout(&self, max_wait: Duration) -> bool {
5579                 self.persistence_notifier.wait_timeout(max_wait)
5580         }
5581
5582         /// Blocks until ChannelManager needs to be persisted. Only one listener on
5583         /// `await_persistable_update` or `await_persistable_update_timeout` is guaranteed to be woken
5584         /// up.
5585         pub fn await_persistable_update(&self) {
5586                 self.persistence_notifier.wait()
5587         }
5588
5589         #[cfg(any(test, feature = "_test_utils"))]
5590         pub fn get_persistence_condvar_value(&self) -> bool {
5591                 let mutcond = &self.persistence_notifier.persistence_lock;
5592                 let &(ref mtx, _) = mutcond;
5593                 let guard = mtx.lock().unwrap();
5594                 *guard
5595         }
5596
5597         /// Gets the latest best block which was connected either via the [`chain::Listen`] or
5598         /// [`chain::Confirm`] interfaces.
5599         pub fn current_best_block(&self) -> BestBlock {
5600                 self.best_block.read().unwrap().clone()
5601         }
5602 }
5603
5604 impl<Signer: Sign, M: Deref , T: Deref , K: Deref , F: Deref , L: Deref >
5605         ChannelMessageHandler for ChannelManager<Signer, M, T, K, F, L>
5606         where M::Target: chain::Watch<Signer>,
5607         T::Target: BroadcasterInterface,
5608         K::Target: KeysInterface<Signer = Signer>,
5609         F::Target: FeeEstimator,
5610         L::Target: Logger,
5611 {
5612         fn handle_open_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::OpenChannel) {
5613                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5614                 let _ = handle_error!(self, self.internal_open_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
5615         }
5616
5617         fn handle_accept_channel(&self, counterparty_node_id: &PublicKey, their_features: InitFeatures, msg: &msgs::AcceptChannel) {
5618                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5619                 let _ = handle_error!(self, self.internal_accept_channel(counterparty_node_id, their_features, msg), *counterparty_node_id);
5620         }
5621
5622         fn handle_funding_created(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingCreated) {
5623                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5624                 let _ = handle_error!(self, self.internal_funding_created(counterparty_node_id, msg), *counterparty_node_id);
5625         }
5626
5627         fn handle_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) {
5628                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5629                 let _ = handle_error!(self, self.internal_funding_signed(counterparty_node_id, msg), *counterparty_node_id);
5630         }
5631
5632         fn handle_funding_locked(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingLocked) {
5633                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5634                 let _ = handle_error!(self, self.internal_funding_locked(counterparty_node_id, msg), *counterparty_node_id);
5635         }
5636
5637         fn handle_shutdown(&self, counterparty_node_id: &PublicKey, their_features: &InitFeatures, msg: &msgs::Shutdown) {
5638                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5639                 let _ = handle_error!(self, self.internal_shutdown(counterparty_node_id, their_features, msg), *counterparty_node_id);
5640         }
5641
5642         fn handle_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) {
5643                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5644                 let _ = handle_error!(self, self.internal_closing_signed(counterparty_node_id, msg), *counterparty_node_id);
5645         }
5646
5647         fn handle_update_add_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateAddHTLC) {
5648                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5649                 let _ = handle_error!(self, self.internal_update_add_htlc(counterparty_node_id, msg), *counterparty_node_id);
5650         }
5651
5652         fn handle_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) {
5653                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5654                 let _ = handle_error!(self, self.internal_update_fulfill_htlc(counterparty_node_id, msg), *counterparty_node_id);
5655         }
5656
5657         fn handle_update_fail_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailHTLC) {
5658                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5659                 let _ = handle_error!(self, self.internal_update_fail_htlc(counterparty_node_id, msg), *counterparty_node_id);
5660         }
5661
5662         fn handle_update_fail_malformed_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFailMalformedHTLC) {
5663                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5664                 let _ = handle_error!(self, self.internal_update_fail_malformed_htlc(counterparty_node_id, msg), *counterparty_node_id);
5665         }
5666
5667         fn handle_commitment_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::CommitmentSigned) {
5668                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5669                 let _ = handle_error!(self, self.internal_commitment_signed(counterparty_node_id, msg), *counterparty_node_id);
5670         }
5671
5672         fn handle_revoke_and_ack(&self, counterparty_node_id: &PublicKey, msg: &msgs::RevokeAndACK) {
5673                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5674                 let _ = handle_error!(self, self.internal_revoke_and_ack(counterparty_node_id, msg), *counterparty_node_id);
5675         }
5676
5677         fn handle_update_fee(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFee) {
5678                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5679                 let _ = handle_error!(self, self.internal_update_fee(counterparty_node_id, msg), *counterparty_node_id);
5680         }
5681
5682         fn handle_announcement_signatures(&self, counterparty_node_id: &PublicKey, msg: &msgs::AnnouncementSignatures) {
5683                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5684                 let _ = handle_error!(self, self.internal_announcement_signatures(counterparty_node_id, msg), *counterparty_node_id);
5685         }
5686
5687         fn handle_channel_update(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelUpdate) {
5688                 PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || {
5689                         if let Ok(persist) = handle_error!(self, self.internal_channel_update(counterparty_node_id, msg), *counterparty_node_id) {
5690                                 persist
5691                         } else {
5692                                 NotifyOption::SkipPersist
5693                         }
5694                 });
5695         }
5696
5697         fn handle_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) {
5698                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5699                 let _ = handle_error!(self, self.internal_channel_reestablish(counterparty_node_id, msg), *counterparty_node_id);
5700         }
5701
5702         fn peer_disconnected(&self, counterparty_node_id: &PublicKey, no_connection_possible: bool) {
5703                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5704                 let mut failed_channels = Vec::new();
5705                 let mut no_channels_remain = true;
5706                 {
5707                         let mut channel_state_lock = self.channel_state.lock().unwrap();
5708                         let channel_state = &mut *channel_state_lock;
5709                         let pending_msg_events = &mut channel_state.pending_msg_events;
5710                         let short_to_id = &mut channel_state.short_to_id;
5711                         log_debug!(self.logger, "Marking channels with {} disconnected and generating channel_updates. We believe we {} make future connections to this peer.",
5712                                 log_pubkey!(counterparty_node_id), if no_connection_possible { "cannot" } else { "can" });
5713                         channel_state.by_id.retain(|_, chan| {
5714                                 if chan.get_counterparty_node_id() == *counterparty_node_id {
5715                                         chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
5716                                         if chan.is_shutdown() {
5717                                                 update_maps_on_chan_removal!(self, short_to_id, chan);
5718                                                 self.issue_channel_close_events(chan, ClosureReason::DisconnectedPeer);
5719                                                 return false;
5720                                         } else {
5721                                                 no_channels_remain = false;
5722                                         }
5723                                 }
5724                                 true
5725                         });
5726                         pending_msg_events.retain(|msg| {
5727                                 match msg {
5728                                         &events::MessageSendEvent::SendAcceptChannel { ref node_id, .. } => node_id != counterparty_node_id,
5729                                         &events::MessageSendEvent::SendOpenChannel { ref node_id, .. } => node_id != counterparty_node_id,
5730                                         &events::MessageSendEvent::SendFundingCreated { ref node_id, .. } => node_id != counterparty_node_id,
5731                                         &events::MessageSendEvent::SendFundingSigned { ref node_id, .. } => node_id != counterparty_node_id,
5732                                         &events::MessageSendEvent::SendFundingLocked { ref node_id, .. } => node_id != counterparty_node_id,
5733                                         &events::MessageSendEvent::SendAnnouncementSignatures { ref node_id, .. } => node_id != counterparty_node_id,
5734                                         &events::MessageSendEvent::UpdateHTLCs { ref node_id, .. } => node_id != counterparty_node_id,
5735                                         &events::MessageSendEvent::SendRevokeAndACK { ref node_id, .. } => node_id != counterparty_node_id,
5736                                         &events::MessageSendEvent::SendClosingSigned { ref node_id, .. } => node_id != counterparty_node_id,
5737                                         &events::MessageSendEvent::SendShutdown { ref node_id, .. } => node_id != counterparty_node_id,
5738                                         &events::MessageSendEvent::SendChannelReestablish { ref node_id, .. } => node_id != counterparty_node_id,
5739                                         &events::MessageSendEvent::BroadcastChannelAnnouncement { .. } => true,
5740                                         &events::MessageSendEvent::BroadcastNodeAnnouncement { .. } => true,
5741                                         &events::MessageSendEvent::BroadcastChannelUpdate { .. } => true,
5742                                         &events::MessageSendEvent::SendChannelUpdate { ref node_id, .. } => node_id != counterparty_node_id,
5743                                         &events::MessageSendEvent::HandleError { ref node_id, .. } => node_id != counterparty_node_id,
5744                                         &events::MessageSendEvent::SendChannelRangeQuery { .. } => false,
5745                                         &events::MessageSendEvent::SendShortIdsQuery { .. } => false,
5746                                         &events::MessageSendEvent::SendReplyChannelRange { .. } => false,
5747                                         &events::MessageSendEvent::SendGossipTimestampFilter { .. } => false,
5748                                 }
5749                         });
5750                 }
5751                 if no_channels_remain {
5752                         self.per_peer_state.write().unwrap().remove(counterparty_node_id);
5753                 }
5754
5755                 for failure in failed_channels.drain(..) {
5756                         self.finish_force_close_channel(failure);
5757                 }
5758         }
5759
5760         fn peer_connected(&self, counterparty_node_id: &PublicKey, init_msg: &msgs::Init) {
5761                 log_debug!(self.logger, "Generating channel_reestablish events for {}", log_pubkey!(counterparty_node_id));
5762
5763                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5764
5765                 {
5766                         let mut peer_state_lock = self.per_peer_state.write().unwrap();
5767                         match peer_state_lock.entry(counterparty_node_id.clone()) {
5768                                 hash_map::Entry::Vacant(e) => {
5769                                         e.insert(Mutex::new(PeerState {
5770                                                 latest_features: init_msg.features.clone(),
5771                                         }));
5772                                 },
5773                                 hash_map::Entry::Occupied(e) => {
5774                                         e.get().lock().unwrap().latest_features = init_msg.features.clone();
5775                                 },
5776                         }
5777                 }
5778
5779                 let mut channel_state_lock = self.channel_state.lock().unwrap();
5780                 let channel_state = &mut *channel_state_lock;
5781                 let pending_msg_events = &mut channel_state.pending_msg_events;
5782                 channel_state.by_id.retain(|_, chan| {
5783                         if chan.get_counterparty_node_id() == *counterparty_node_id {
5784                                 if !chan.have_received_message() {
5785                                         // If we created this (outbound) channel while we were disconnected from the
5786                                         // peer we probably failed to send the open_channel message, which is now
5787                                         // lost. We can't have had anything pending related to this channel, so we just
5788                                         // drop it.
5789                                         false
5790                                 } else {
5791                                         pending_msg_events.push(events::MessageSendEvent::SendChannelReestablish {
5792                                                 node_id: chan.get_counterparty_node_id(),
5793                                                 msg: chan.get_channel_reestablish(&self.logger),
5794                                         });
5795                                         true
5796                                 }
5797                         } else { true }
5798                 });
5799                 //TODO: Also re-broadcast announcement_signatures
5800         }
5801
5802         fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
5803                 let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier);
5804
5805                 if msg.channel_id == [0; 32] {
5806                         for chan in self.list_channels() {
5807                                 if chan.counterparty.node_id == *counterparty_node_id {
5808                                         // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
5809                                         let _ = self.force_close_channel_with_peer(&chan.channel_id, counterparty_node_id, Some(&msg.data));
5810                                 }
5811                         }
5812                 } else {
5813                         {
5814                                 // First check if we can advance the channel type and try again.
5815                                 let mut channel_state = self.channel_state.lock().unwrap();
5816                                 if let Some(chan) = channel_state.by_id.get_mut(&msg.channel_id) {
5817                                         if chan.get_counterparty_node_id() != *counterparty_node_id {
5818                                                 return;
5819                                         }
5820                                         if let Ok(msg) = chan.maybe_handle_error_without_close(self.genesis_hash) {
5821                                                 channel_state.pending_msg_events.push(events::MessageSendEvent::SendOpenChannel {
5822                                                         node_id: *counterparty_node_id,
5823                                                         msg,
5824                                                 });
5825                                                 return;
5826                                         }
5827                                 }
5828                         }
5829
5830                         // Untrusted messages from peer, we throw away the error if id points to a non-existent channel
5831                         let _ = self.force_close_channel_with_peer(&msg.channel_id, counterparty_node_id, Some(&msg.data));
5832                 }
5833         }
5834 }
5835
5836 /// Used to signal to the ChannelManager persister that the manager needs to be re-persisted to
5837 /// disk/backups, through `await_persistable_update_timeout` and `await_persistable_update`.
5838 struct PersistenceNotifier {
5839         /// Users won't access the persistence_lock directly, but rather wait on its bool using
5840         /// `wait_timeout` and `wait`.
5841         persistence_lock: (Mutex<bool>, Condvar),
5842 }
5843
5844 impl PersistenceNotifier {
5845         fn new() -> Self {
5846                 Self {
5847                         persistence_lock: (Mutex::new(false), Condvar::new()),
5848                 }
5849         }
5850
5851         fn wait(&self) {
5852                 loop {
5853                         let &(ref mtx, ref cvar) = &self.persistence_lock;
5854                         let mut guard = mtx.lock().unwrap();
5855                         if *guard {
5856                                 *guard = false;
5857                                 return;
5858                         }
5859                         guard = cvar.wait(guard).unwrap();
5860                         let result = *guard;
5861                         if result {
5862                                 *guard = false;
5863                                 return
5864                         }
5865                 }
5866         }
5867
5868         #[cfg(any(test, feature = "std"))]
5869         fn wait_timeout(&self, max_wait: Duration) -> bool {
5870                 let current_time = Instant::now();
5871                 loop {
5872                         let &(ref mtx, ref cvar) = &self.persistence_lock;
5873                         let mut guard = mtx.lock().unwrap();
5874                         if *guard {
5875                                 *guard = false;
5876                                 return true;
5877                         }
5878                         guard = cvar.wait_timeout(guard, max_wait).unwrap().0;
5879                         // Due to spurious wakeups that can happen on `wait_timeout`, here we need to check if the
5880                         // desired wait time has actually passed, and if not then restart the loop with a reduced wait
5881                         // time. Note that this logic can be highly simplified through the use of
5882                         // `Condvar::wait_while` and `Condvar::wait_timeout_while`, if and when our MSRV is raised to
5883                         // 1.42.0.
5884                         let elapsed = current_time.elapsed();
5885                         let result = *guard;
5886                         if result || elapsed >= max_wait {
5887                                 *guard = false;
5888                                 return result;
5889                         }
5890                         match max_wait.checked_sub(elapsed) {
5891                                 None => return result,
5892                                 Some(_) => continue
5893                         }
5894                 }
5895         }
5896
5897         // Signal to the ChannelManager persister that there are updates necessitating persisting to disk.
5898         fn notify(&self) {
5899                 let &(ref persist_mtx, ref cnd) = &self.persistence_lock;
5900                 let mut persistence_lock = persist_mtx.lock().unwrap();
5901                 *persistence_lock = true;
5902                 mem::drop(persistence_lock);
5903                 cnd.notify_all();
5904         }
5905 }
5906
5907 const SERIALIZATION_VERSION: u8 = 1;
5908 const MIN_SERIALIZATION_VERSION: u8 = 1;
5909
5910 impl_writeable_tlv_based!(CounterpartyForwardingInfo, {
5911         (2, fee_base_msat, required),
5912         (4, fee_proportional_millionths, required),
5913         (6, cltv_expiry_delta, required),
5914 });
5915
5916 impl_writeable_tlv_based!(ChannelCounterparty, {
5917         (2, node_id, required),
5918         (4, features, required),
5919         (6, unspendable_punishment_reserve, required),
5920         (8, forwarding_info, option),
5921         (9, outbound_htlc_minimum_msat, option),
5922         (11, outbound_htlc_maximum_msat, option),
5923 });
5924
5925 impl_writeable_tlv_based!(ChannelDetails, {
5926         (1, inbound_scid_alias, option),
5927         (2, channel_id, required),
5928         (3, channel_type, option),
5929         (4, counterparty, required),
5930         (6, funding_txo, option),
5931         (8, short_channel_id, option),
5932         (10, channel_value_satoshis, required),
5933         (12, unspendable_punishment_reserve, option),
5934         (14, user_channel_id, required),
5935         (16, balance_msat, required),
5936         (18, outbound_capacity_msat, required),
5937         // Note that by the time we get past the required read above, outbound_capacity_msat will be
5938         // filled in, so we can safely unwrap it here.
5939         (19, next_outbound_htlc_limit_msat, (default_value, outbound_capacity_msat.0.unwrap())),
5940         (20, inbound_capacity_msat, required),
5941         (22, confirmations_required, option),
5942         (24, force_close_spend_delay, option),
5943         (26, is_outbound, required),
5944         (28, is_funding_locked, required),
5945         (30, is_usable, required),
5946         (32, is_public, required),
5947         (33, inbound_htlc_minimum_msat, option),
5948         (35, inbound_htlc_maximum_msat, option),
5949 });
5950
5951 impl_writeable_tlv_based!(PhantomRouteHints, {
5952         (2, channels, vec_type),
5953         (4, phantom_scid, required),
5954         (6, real_node_pubkey, required),
5955 });
5956
5957 impl_writeable_tlv_based_enum!(PendingHTLCRouting,
5958         (0, Forward) => {
5959                 (0, onion_packet, required),
5960                 (2, short_channel_id, required),
5961         },
5962         (1, Receive) => {
5963                 (0, payment_data, required),
5964                 (1, phantom_shared_secret, option),
5965                 (2, incoming_cltv_expiry, required),
5966         },
5967         (2, ReceiveKeysend) => {
5968                 (0, payment_preimage, required),
5969                 (2, incoming_cltv_expiry, required),
5970         },
5971 ;);
5972
5973 impl_writeable_tlv_based!(PendingHTLCInfo, {
5974         (0, routing, required),
5975         (2, incoming_shared_secret, required),
5976         (4, payment_hash, required),
5977         (6, amt_to_forward, required),
5978         (8, outgoing_cltv_value, required)
5979 });
5980
5981
5982 impl Writeable for HTLCFailureMsg {
5983         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
5984                 match self {
5985                         HTLCFailureMsg::Relay(msgs::UpdateFailHTLC { channel_id, htlc_id, reason }) => {
5986                                 0u8.write(writer)?;
5987                                 channel_id.write(writer)?;
5988                                 htlc_id.write(writer)?;
5989                                 reason.write(writer)?;
5990                         },
5991                         HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
5992                                 channel_id, htlc_id, sha256_of_onion, failure_code
5993                         }) => {
5994                                 1u8.write(writer)?;
5995                                 channel_id.write(writer)?;
5996                                 htlc_id.write(writer)?;
5997                                 sha256_of_onion.write(writer)?;
5998                                 failure_code.write(writer)?;
5999                         },
6000                 }
6001                 Ok(())
6002         }
6003 }
6004
6005 impl Readable for HTLCFailureMsg {
6006         fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
6007                 let id: u8 = Readable::read(reader)?;
6008                 match id {
6009                         0 => {
6010                                 Ok(HTLCFailureMsg::Relay(msgs::UpdateFailHTLC {
6011                                         channel_id: Readable::read(reader)?,
6012                                         htlc_id: Readable::read(reader)?,
6013                                         reason: Readable::read(reader)?,
6014                                 }))
6015                         },
6016                         1 => {
6017                                 Ok(HTLCFailureMsg::Malformed(msgs::UpdateFailMalformedHTLC {
6018                                         channel_id: Readable::read(reader)?,
6019                                         htlc_id: Readable::read(reader)?,
6020                                         sha256_of_onion: Readable::read(reader)?,
6021                                         failure_code: Readable::read(reader)?,
6022                                 }))
6023                         },
6024                         // In versions prior to 0.0.101, HTLCFailureMsg objects were written with type 0 or 1 but
6025                         // weren't length-prefixed and thus didn't support reading the TLV stream suffix of the network
6026                         // messages contained in the variants.
6027                         // In version 0.0.101, support for reading the variants with these types was added, and
6028                         // we should migrate to writing these variants when UpdateFailHTLC or
6029                         // UpdateFailMalformedHTLC get TLV fields.
6030                         2 => {
6031                                 let length: BigSize = Readable::read(reader)?;
6032                                 let mut s = FixedLengthReader::new(reader, length.0);
6033                                 let res = Readable::read(&mut s)?;
6034                                 s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
6035                                 Ok(HTLCFailureMsg::Relay(res))
6036                         },
6037                         3 => {
6038                                 let length: BigSize = Readable::read(reader)?;
6039                                 let mut s = FixedLengthReader::new(reader, length.0);
6040                                 let res = Readable::read(&mut s)?;
6041                                 s.eat_remaining()?; // Return ShortRead if there's actually not enough bytes
6042                                 Ok(HTLCFailureMsg::Malformed(res))
6043                         },
6044                         _ => Err(DecodeError::UnknownRequiredFeature),
6045                 }
6046         }
6047 }
6048
6049 impl_writeable_tlv_based_enum!(PendingHTLCStatus, ;
6050         (0, Forward),
6051         (1, Fail),
6052 );
6053
6054 impl_writeable_tlv_based!(HTLCPreviousHopData, {
6055         (0, short_channel_id, required),
6056         (1, phantom_shared_secret, option),
6057         (2, outpoint, required),
6058         (4, htlc_id, required),
6059         (6, incoming_packet_shared_secret, required)
6060 });
6061
6062 impl Writeable for ClaimableHTLC {
6063         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6064                 let payment_data = match &self.onion_payload {
6065                         OnionPayload::Invoice { _legacy_hop_data } => Some(_legacy_hop_data),
6066                         _ => None,
6067                 };
6068                 let keysend_preimage = match self.onion_payload {
6069                         OnionPayload::Invoice { .. } => None,
6070                         OnionPayload::Spontaneous(preimage) => Some(preimage.clone()),
6071                 };
6072                 write_tlv_fields!(writer, {
6073                         (0, self.prev_hop, required),
6074                         (1, self.total_msat, required),
6075                         (2, self.value, required),
6076                         (4, payment_data, option),
6077                         (6, self.cltv_expiry, required),
6078                         (8, keysend_preimage, option),
6079                 });
6080                 Ok(())
6081         }
6082 }
6083
6084 impl Readable for ClaimableHTLC {
6085         fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
6086                 let mut prev_hop = ::util::ser::OptionDeserWrapper(None);
6087                 let mut value = 0;
6088                 let mut payment_data: Option<msgs::FinalOnionHopData> = None;
6089                 let mut cltv_expiry = 0;
6090                 let mut total_msat = None;
6091                 let mut keysend_preimage: Option<PaymentPreimage> = None;
6092                 read_tlv_fields!(reader, {
6093                         (0, prev_hop, required),
6094                         (1, total_msat, option),
6095                         (2, value, required),
6096                         (4, payment_data, option),
6097                         (6, cltv_expiry, required),
6098                         (8, keysend_preimage, option)
6099                 });
6100                 let onion_payload = match keysend_preimage {
6101                         Some(p) => {
6102                                 if payment_data.is_some() {
6103                                         return Err(DecodeError::InvalidValue)
6104                                 }
6105                                 if total_msat.is_none() {
6106                                         total_msat = Some(value);
6107                                 }
6108                                 OnionPayload::Spontaneous(p)
6109                         },
6110                         None => {
6111                                 if payment_data.is_none() {
6112                                         return Err(DecodeError::InvalidValue)
6113                                 }
6114                                 if total_msat.is_none() {
6115                                         total_msat = Some(payment_data.as_ref().unwrap().total_msat);
6116                                 }
6117                                 OnionPayload::Invoice { _legacy_hop_data: payment_data.unwrap() }
6118                         },
6119                 };
6120                 Ok(Self {
6121                         prev_hop: prev_hop.0.unwrap(),
6122                         timer_ticks: 0,
6123                         value,
6124                         total_msat: total_msat.unwrap(),
6125                         onion_payload,
6126                         cltv_expiry,
6127                 })
6128         }
6129 }
6130
6131 impl Readable for HTLCSource {
6132         fn read<R: Read>(reader: &mut R) -> Result<Self, DecodeError> {
6133                 let id: u8 = Readable::read(reader)?;
6134                 match id {
6135                         0 => {
6136                                 let mut session_priv: ::util::ser::OptionDeserWrapper<SecretKey> = ::util::ser::OptionDeserWrapper(None);
6137                                 let mut first_hop_htlc_msat: u64 = 0;
6138                                 let mut path = Some(Vec::new());
6139                                 let mut payment_id = None;
6140                                 let mut payment_secret = None;
6141                                 let mut payment_params = None;
6142                                 read_tlv_fields!(reader, {
6143                                         (0, session_priv, required),
6144                                         (1, payment_id, option),
6145                                         (2, first_hop_htlc_msat, required),
6146                                         (3, payment_secret, option),
6147                                         (4, path, vec_type),
6148                                         (5, payment_params, option),
6149                                 });
6150                                 if payment_id.is_none() {
6151                                         // For backwards compat, if there was no payment_id written, use the session_priv bytes
6152                                         // instead.
6153                                         payment_id = Some(PaymentId(*session_priv.0.unwrap().as_ref()));
6154                                 }
6155                                 Ok(HTLCSource::OutboundRoute {
6156                                         session_priv: session_priv.0.unwrap(),
6157                                         first_hop_htlc_msat: first_hop_htlc_msat,
6158                                         path: path.unwrap(),
6159                                         payment_id: payment_id.unwrap(),
6160                                         payment_secret,
6161                                         payment_params,
6162                                 })
6163                         }
6164                         1 => Ok(HTLCSource::PreviousHopData(Readable::read(reader)?)),
6165                         _ => Err(DecodeError::UnknownRequiredFeature),
6166                 }
6167         }
6168 }
6169
6170 impl Writeable for HTLCSource {
6171         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ::io::Error> {
6172                 match self {
6173                         HTLCSource::OutboundRoute { ref session_priv, ref first_hop_htlc_msat, ref path, payment_id, payment_secret, payment_params } => {
6174                                 0u8.write(writer)?;
6175                                 let payment_id_opt = Some(payment_id);
6176                                 write_tlv_fields!(writer, {
6177                                         (0, session_priv, required),
6178                                         (1, payment_id_opt, option),
6179                                         (2, first_hop_htlc_msat, required),
6180                                         (3, payment_secret, option),
6181                                         (4, path, vec_type),
6182                                         (5, payment_params, option),
6183                                  });
6184                         }
6185                         HTLCSource::PreviousHopData(ref field) => {
6186                                 1u8.write(writer)?;
6187                                 field.write(writer)?;
6188                         }
6189                 }
6190                 Ok(())
6191         }
6192 }
6193
6194 impl_writeable_tlv_based_enum!(HTLCFailReason,
6195         (0, LightningError) => {
6196                 (0, err, required),
6197         },
6198         (1, Reason) => {
6199                 (0, failure_code, required),
6200                 (2, data, vec_type),
6201         },
6202 ;);
6203
6204 impl_writeable_tlv_based_enum!(HTLCForwardInfo,
6205         (0, AddHTLC) => {
6206                 (0, forward_info, required),
6207                 (2, prev_short_channel_id, required),
6208                 (4, prev_htlc_id, required),
6209                 (6, prev_funding_outpoint, required),
6210         },
6211         (1, FailHTLC) => {
6212                 (0, htlc_id, required),
6213                 (2, err_packet, required),
6214         },
6215 ;);
6216
6217 impl_writeable_tlv_based!(PendingInboundPayment, {
6218         (0, payment_secret, required),
6219         (2, expiry_time, required),
6220         (4, user_payment_id, required),
6221         (6, payment_preimage, required),
6222         (8, min_value_msat, required),
6223 });
6224
6225 impl_writeable_tlv_based_enum_upgradable!(PendingOutboundPayment,
6226         (0, Legacy) => {
6227                 (0, session_privs, required),
6228         },
6229         (1, Fulfilled) => {
6230                 (0, session_privs, required),
6231                 (1, payment_hash, option),
6232         },
6233         (2, Retryable) => {
6234                 (0, session_privs, required),
6235                 (1, pending_fee_msat, option),
6236                 (2, payment_hash, required),
6237                 (4, payment_secret, option),
6238                 (6, total_msat, required),
6239                 (8, pending_amt_msat, required),
6240                 (10, starting_block_height, required),
6241         },
6242         (3, Abandoned) => {
6243                 (0, session_privs, required),
6244                 (2, payment_hash, required),
6245         },
6246 );
6247
6248 impl<Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> Writeable for ChannelManager<Signer, M, T, K, F, L>
6249         where M::Target: chain::Watch<Signer>,
6250         T::Target: BroadcasterInterface,
6251         K::Target: KeysInterface<Signer = Signer>,
6252         F::Target: FeeEstimator,
6253         L::Target: Logger,
6254 {
6255         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
6256                 let _consistency_lock = self.total_consistency_lock.write().unwrap();
6257
6258                 write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
6259
6260                 self.genesis_hash.write(writer)?;
6261                 {
6262                         let best_block = self.best_block.read().unwrap();
6263                         best_block.height().write(writer)?;
6264                         best_block.block_hash().write(writer)?;
6265                 }
6266
6267                 let channel_state = self.channel_state.lock().unwrap();
6268                 let mut unfunded_channels = 0;
6269                 for (_, channel) in channel_state.by_id.iter() {
6270                         if !channel.is_funding_initiated() {
6271                                 unfunded_channels += 1;
6272                         }
6273                 }
6274                 ((channel_state.by_id.len() - unfunded_channels) as u64).write(writer)?;
6275                 for (_, channel) in channel_state.by_id.iter() {
6276                         if channel.is_funding_initiated() {
6277                                 channel.write(writer)?;
6278                         }
6279                 }
6280
6281                 (channel_state.forward_htlcs.len() as u64).write(writer)?;
6282                 for (short_channel_id, pending_forwards) in channel_state.forward_htlcs.iter() {
6283                         short_channel_id.write(writer)?;
6284                         (pending_forwards.len() as u64).write(writer)?;
6285                         for forward in pending_forwards {
6286                                 forward.write(writer)?;
6287                         }
6288                 }
6289
6290                 (channel_state.claimable_htlcs.len() as u64).write(writer)?;
6291                 for (payment_hash, previous_hops) in channel_state.claimable_htlcs.iter() {
6292                         payment_hash.write(writer)?;
6293                         (previous_hops.len() as u64).write(writer)?;
6294                         for htlc in previous_hops.iter() {
6295                                 htlc.write(writer)?;
6296                         }
6297                 }
6298
6299                 let per_peer_state = self.per_peer_state.write().unwrap();
6300                 (per_peer_state.len() as u64).write(writer)?;
6301                 for (peer_pubkey, peer_state_mutex) in per_peer_state.iter() {
6302                         peer_pubkey.write(writer)?;
6303                         let peer_state = peer_state_mutex.lock().unwrap();
6304                         peer_state.latest_features.write(writer)?;
6305                 }
6306
6307                 let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap();
6308                 let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap();
6309                 let events = self.pending_events.lock().unwrap();
6310                 (events.len() as u64).write(writer)?;
6311                 for event in events.iter() {
6312                         event.write(writer)?;
6313                 }
6314
6315                 let background_events = self.pending_background_events.lock().unwrap();
6316                 (background_events.len() as u64).write(writer)?;
6317                 for event in background_events.iter() {
6318                         match event {
6319                                 BackgroundEvent::ClosingMonitorUpdate((funding_txo, monitor_update)) => {
6320                                         0u8.write(writer)?;
6321                                         funding_txo.write(writer)?;
6322                                         monitor_update.write(writer)?;
6323                                 },
6324                         }
6325                 }
6326
6327                 (self.last_node_announcement_serial.load(Ordering::Acquire) as u32).write(writer)?;
6328                 (self.highest_seen_timestamp.load(Ordering::Acquire) as u32).write(writer)?;
6329
6330                 (pending_inbound_payments.len() as u64).write(writer)?;
6331                 for (hash, pending_payment) in pending_inbound_payments.iter() {
6332                         hash.write(writer)?;
6333                         pending_payment.write(writer)?;
6334                 }
6335
6336                 // For backwards compat, write the session privs and their total length.
6337                 let mut num_pending_outbounds_compat: u64 = 0;
6338                 for (_, outbound) in pending_outbound_payments.iter() {
6339                         if !outbound.is_fulfilled() && !outbound.abandoned() {
6340                                 num_pending_outbounds_compat += outbound.remaining_parts() as u64;
6341                         }
6342                 }
6343                 num_pending_outbounds_compat.write(writer)?;
6344                 for (_, outbound) in pending_outbound_payments.iter() {
6345                         match outbound {
6346                                 PendingOutboundPayment::Legacy { session_privs } |
6347                                 PendingOutboundPayment::Retryable { session_privs, .. } => {
6348                                         for session_priv in session_privs.iter() {
6349                                                 session_priv.write(writer)?;
6350                                         }
6351                                 }
6352                                 PendingOutboundPayment::Fulfilled { .. } => {},
6353                                 PendingOutboundPayment::Abandoned { .. } => {},
6354                         }
6355                 }
6356
6357                 // Encode without retry info for 0.0.101 compatibility.
6358                 let mut pending_outbound_payments_no_retry: HashMap<PaymentId, HashSet<[u8; 32]>> = HashMap::new();
6359                 for (id, outbound) in pending_outbound_payments.iter() {
6360                         match outbound {
6361                                 PendingOutboundPayment::Legacy { session_privs } |
6362                                 PendingOutboundPayment::Retryable { session_privs, .. } => {
6363                                         pending_outbound_payments_no_retry.insert(*id, session_privs.clone());
6364                                 },
6365                                 _ => {},
6366                         }
6367                 }
6368                 write_tlv_fields!(writer, {
6369                         (1, pending_outbound_payments_no_retry, required),
6370                         (3, pending_outbound_payments, required),
6371                         (5, self.our_network_pubkey, required),
6372                         (7, self.fake_scid_rand_bytes, required),
6373                 });
6374
6375                 Ok(())
6376         }
6377 }
6378
6379 /// Arguments for the creation of a ChannelManager that are not deserialized.
6380 ///
6381 /// At a high-level, the process for deserializing a ChannelManager and resuming normal operation
6382 /// is:
6383 /// 1) Deserialize all stored [`ChannelMonitor`]s.
6384 /// 2) Deserialize the [`ChannelManager`] by filling in this struct and calling:
6385 ///    `<(BlockHash, ChannelManager)>::read(reader, args)`
6386 ///    This may result in closing some channels if the [`ChannelMonitor`] is newer than the stored
6387 ///    [`ChannelManager`] state to ensure no loss of funds. Thus, transactions may be broadcasted.
6388 /// 3) If you are not fetching full blocks, register all relevant [`ChannelMonitor`] outpoints the
6389 ///    same way you would handle a [`chain::Filter`] call using
6390 ///    [`ChannelMonitor::get_outputs_to_watch`] and [`ChannelMonitor::get_funding_txo`].
6391 /// 4) Reconnect blocks on your [`ChannelMonitor`]s.
6392 /// 5) Disconnect/connect blocks on the [`ChannelManager`].
6393 /// 6) Re-persist the [`ChannelMonitor`]s to ensure the latest state is on disk.
6394 ///    Note that if you're using a [`ChainMonitor`] for your [`chain::Watch`] implementation, you
6395 ///    will likely accomplish this as a side-effect of calling [`chain::Watch::watch_channel`] in
6396 ///    the next step.
6397 /// 7) Move the [`ChannelMonitor`]s into your local [`chain::Watch`]. If you're using a
6398 ///    [`ChainMonitor`], this is done by calling [`chain::Watch::watch_channel`].
6399 ///
6400 /// Note that the ordering of #4-7 is not of importance, however all four must occur before you
6401 /// call any other methods on the newly-deserialized [`ChannelManager`].
6402 ///
6403 /// Note that because some channels may be closed during deserialization, it is critical that you
6404 /// always deserialize only the latest version of a ChannelManager and ChannelMonitors available to
6405 /// you. If you deserialize an old ChannelManager (during which force-closure transactions may be
6406 /// broadcast), and then later deserialize a newer version of the same ChannelManager (which will
6407 /// not force-close the same channels but consider them live), you may end up revoking a state for
6408 /// which you've already broadcasted the transaction.
6409 ///
6410 /// [`ChainMonitor`]: crate::chain::chainmonitor::ChainMonitor
6411 pub struct ChannelManagerReadArgs<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
6412         where M::Target: chain::Watch<Signer>,
6413         T::Target: BroadcasterInterface,
6414         K::Target: KeysInterface<Signer = Signer>,
6415         F::Target: FeeEstimator,
6416         L::Target: Logger,
6417 {
6418         /// The keys provider which will give us relevant keys. Some keys will be loaded during
6419         /// deserialization and KeysInterface::read_chan_signer will be used to read per-Channel
6420         /// signing data.
6421         pub keys_manager: K,
6422
6423         /// The fee_estimator for use in the ChannelManager in the future.
6424         ///
6425         /// No calls to the FeeEstimator will be made during deserialization.
6426         pub fee_estimator: F,
6427         /// The chain::Watch for use in the ChannelManager in the future.
6428         ///
6429         /// No calls to the chain::Watch will be made during deserialization. It is assumed that
6430         /// you have deserialized ChannelMonitors separately and will add them to your
6431         /// chain::Watch after deserializing this ChannelManager.
6432         pub chain_monitor: M,
6433
6434         /// The BroadcasterInterface which will be used in the ChannelManager in the future and may be
6435         /// used to broadcast the latest local commitment transactions of channels which must be
6436         /// force-closed during deserialization.
6437         pub tx_broadcaster: T,
6438         /// The Logger for use in the ChannelManager and which may be used to log information during
6439         /// deserialization.
6440         pub logger: L,
6441         /// Default settings used for new channels. Any existing channels will continue to use the
6442         /// runtime settings which were stored when the ChannelManager was serialized.
6443         pub default_config: UserConfig,
6444
6445         /// A map from channel funding outpoints to ChannelMonitors for those channels (ie
6446         /// value.get_funding_txo() should be the key).
6447         ///
6448         /// If a monitor is inconsistent with the channel state during deserialization the channel will
6449         /// be force-closed using the data in the ChannelMonitor and the channel will be dropped. This
6450         /// is true for missing channels as well. If there is a monitor missing for which we find
6451         /// channel data Err(DecodeError::InvalidValue) will be returned.
6452         ///
6453         /// In such cases the latest local transactions will be sent to the tx_broadcaster included in
6454         /// this struct.
6455         ///
6456         /// (C-not exported) because we have no HashMap bindings
6457         pub channel_monitors: HashMap<OutPoint, &'a mut ChannelMonitor<Signer>>,
6458 }
6459
6460 impl<'a, Signer: 'a + Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
6461                 ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>
6462         where M::Target: chain::Watch<Signer>,
6463                 T::Target: BroadcasterInterface,
6464                 K::Target: KeysInterface<Signer = Signer>,
6465                 F::Target: FeeEstimator,
6466                 L::Target: Logger,
6467         {
6468         /// Simple utility function to create a ChannelManagerReadArgs which creates the monitor
6469         /// HashMap for you. This is primarily useful for C bindings where it is not practical to
6470         /// populate a HashMap directly from C.
6471         pub fn new(keys_manager: K, fee_estimator: F, chain_monitor: M, tx_broadcaster: T, logger: L, default_config: UserConfig,
6472                         mut channel_monitors: Vec<&'a mut ChannelMonitor<Signer>>) -> Self {
6473                 Self {
6474                         keys_manager, fee_estimator, chain_monitor, tx_broadcaster, logger, default_config,
6475                         channel_monitors: channel_monitors.drain(..).map(|monitor| { (monitor.get_funding_txo().0, monitor) }).collect()
6476                 }
6477         }
6478 }
6479
6480 // Implement ReadableArgs for an Arc'd ChannelManager to make it a bit easier to work with the
6481 // SipmleArcChannelManager type:
6482 impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
6483         ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, Arc<ChannelManager<Signer, M, T, K, F, L>>)
6484         where M::Target: chain::Watch<Signer>,
6485         T::Target: BroadcasterInterface,
6486         K::Target: KeysInterface<Signer = Signer>,
6487         F::Target: FeeEstimator,
6488         L::Target: Logger,
6489 {
6490         fn read<R: io::Read>(reader: &mut R, args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
6491                 let (blockhash, chan_manager) = <(BlockHash, ChannelManager<Signer, M, T, K, F, L>)>::read(reader, args)?;
6492                 Ok((blockhash, Arc::new(chan_manager)))
6493         }
6494 }
6495
6496 impl<'a, Signer: Sign, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref>
6497         ReadableArgs<ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>> for (BlockHash, ChannelManager<Signer, M, T, K, F, L>)
6498         where M::Target: chain::Watch<Signer>,
6499         T::Target: BroadcasterInterface,
6500         K::Target: KeysInterface<Signer = Signer>,
6501         F::Target: FeeEstimator,
6502         L::Target: Logger,
6503 {
6504         fn read<R: io::Read>(reader: &mut R, mut args: ChannelManagerReadArgs<'a, Signer, M, T, K, F, L>) -> Result<Self, DecodeError> {
6505                 let _ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
6506
6507                 let genesis_hash: BlockHash = Readable::read(reader)?;
6508                 let best_block_height: u32 = Readable::read(reader)?;
6509                 let best_block_hash: BlockHash = Readable::read(reader)?;
6510
6511                 let mut failed_htlcs = Vec::new();
6512
6513                 let channel_count: u64 = Readable::read(reader)?;
6514                 let mut funding_txo_set = HashSet::with_capacity(cmp::min(channel_count as usize, 128));
6515                 let mut by_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
6516                 let mut short_to_id = HashMap::with_capacity(cmp::min(channel_count as usize, 128));
6517                 let mut channel_closures = Vec::new();
6518                 for _ in 0..channel_count {
6519                         let mut channel: Channel<Signer> = Channel::read(reader, (&args.keys_manager, best_block_height))?;
6520                         let funding_txo = channel.get_funding_txo().ok_or(DecodeError::InvalidValue)?;
6521                         funding_txo_set.insert(funding_txo.clone());
6522                         if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) {
6523                                 if channel.get_cur_holder_commitment_transaction_number() < monitor.get_cur_holder_commitment_number() ||
6524                                                 channel.get_revoked_counterparty_commitment_transaction_number() < monitor.get_min_seen_secret() ||
6525                                                 channel.get_cur_counterparty_commitment_transaction_number() < monitor.get_cur_counterparty_commitment_number() ||
6526                                                 channel.get_latest_monitor_update_id() > monitor.get_latest_update_id() {
6527                                         // If the channel is ahead of the monitor, return InvalidValue:
6528                                         log_error!(args.logger, "A ChannelMonitor is stale compared to the current ChannelManager! This indicates a potentially-critical violation of the chain::Watch API!");
6529                                         log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
6530                                                 log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
6531                                         log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
6532                                         log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
6533                                         log_error!(args.logger, " Without the latest ChannelMonitor we cannot continue without risking funds.");
6534                                         log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
6535                                         return Err(DecodeError::InvalidValue);
6536                                 } else if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() ||
6537                                                 channel.get_revoked_counterparty_commitment_transaction_number() > monitor.get_min_seen_secret() ||
6538                                                 channel.get_cur_counterparty_commitment_transaction_number() > monitor.get_cur_counterparty_commitment_number() ||
6539                                                 channel.get_latest_monitor_update_id() < monitor.get_latest_update_id() {
6540                                         // But if the channel is behind of the monitor, close the channel:
6541                                         log_error!(args.logger, "A ChannelManager is stale compared to the current ChannelMonitor!");
6542                                         log_error!(args.logger, " The channel will be force-closed and the latest commitment transaction from the ChannelMonitor broadcast.");
6543                                         log_error!(args.logger, " The ChannelMonitor for channel {} is at update_id {} but the ChannelManager is at update_id {}.",
6544                                                 log_bytes!(channel.channel_id()), monitor.get_latest_update_id(), channel.get_latest_monitor_update_id());
6545                                         let (_, mut new_failed_htlcs) = channel.force_shutdown(true);
6546                                         failed_htlcs.append(&mut new_failed_htlcs);
6547                                         monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
6548                                         channel_closures.push(events::Event::ChannelClosed {
6549                                                 channel_id: channel.channel_id(),
6550                                                 user_channel_id: channel.get_user_id(),
6551                                                 reason: ClosureReason::OutdatedChannelManager
6552                                         });
6553                                 } else {
6554                                         log_info!(args.logger, "Successfully loaded channel {}", log_bytes!(channel.channel_id()));
6555                                         if let Some(short_channel_id) = channel.get_short_channel_id() {
6556                                                 short_to_id.insert(short_channel_id, channel.channel_id());
6557                                         }
6558                                         by_id.insert(channel.channel_id(), channel);
6559                                 }
6560                         } else {
6561                                 log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.channel_id()));
6562                                 log_error!(args.logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,");
6563                                 log_error!(args.logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!");
6564                                 log_error!(args.logger, " Without the ChannelMonitor we cannot continue without risking funds.");
6565                                 log_error!(args.logger, " Please ensure the chain::Watch API requirements are met and file a bug report at https://github.com/lightningdevkit/rust-lightning");
6566                                 return Err(DecodeError::InvalidValue);
6567                         }
6568                 }
6569
6570                 for (ref funding_txo, ref mut monitor) in args.channel_monitors.iter_mut() {
6571                         if !funding_txo_set.contains(funding_txo) {
6572                                 log_info!(args.logger, "Broadcasting latest holder commitment transaction for closed channel {}", log_bytes!(funding_txo.to_channel_id()));
6573                                 monitor.broadcast_latest_holder_commitment_txn(&args.tx_broadcaster, &args.logger);
6574                         }
6575                 }
6576
6577                 const MAX_ALLOC_SIZE: usize = 1024 * 64;
6578                 let forward_htlcs_count: u64 = Readable::read(reader)?;
6579                 let mut forward_htlcs = HashMap::with_capacity(cmp::min(forward_htlcs_count as usize, 128));
6580                 for _ in 0..forward_htlcs_count {
6581                         let short_channel_id = Readable::read(reader)?;
6582                         let pending_forwards_count: u64 = Readable::read(reader)?;
6583                         let mut pending_forwards = Vec::with_capacity(cmp::min(pending_forwards_count as usize, MAX_ALLOC_SIZE/mem::size_of::<HTLCForwardInfo>()));
6584                         for _ in 0..pending_forwards_count {
6585                                 pending_forwards.push(Readable::read(reader)?);
6586                         }
6587                         forward_htlcs.insert(short_channel_id, pending_forwards);
6588                 }
6589
6590                 let claimable_htlcs_count: u64 = Readable::read(reader)?;
6591                 let mut claimable_htlcs = HashMap::with_capacity(cmp::min(claimable_htlcs_count as usize, 128));
6592                 for _ in 0..claimable_htlcs_count {
6593                         let payment_hash = Readable::read(reader)?;
6594                         let previous_hops_len: u64 = Readable::read(reader)?;
6595                         let mut previous_hops = Vec::with_capacity(cmp::min(previous_hops_len as usize, MAX_ALLOC_SIZE/mem::size_of::<ClaimableHTLC>()));
6596                         for _ in 0..previous_hops_len {
6597                                 previous_hops.push(Readable::read(reader)?);
6598                         }
6599                         claimable_htlcs.insert(payment_hash, previous_hops);
6600                 }
6601
6602                 let peer_count: u64 = Readable::read(reader)?;
6603                 let mut per_peer_state = HashMap::with_capacity(cmp::min(peer_count as usize, MAX_ALLOC_SIZE/mem::size_of::<(PublicKey, Mutex<PeerState>)>()));
6604                 for _ in 0..peer_count {
6605                         let peer_pubkey = Readable::read(reader)?;
6606                         let peer_state = PeerState {
6607                                 latest_features: Readable::read(reader)?,
6608                         };
6609                         per_peer_state.insert(peer_pubkey, Mutex::new(peer_state));
6610                 }
6611
6612                 let event_count: u64 = Readable::read(reader)?;
6613                 let mut pending_events_read: Vec<events::Event> = Vec::with_capacity(cmp::min(event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<events::Event>()));
6614                 for _ in 0..event_count {
6615                         match MaybeReadable::read(reader)? {
6616                                 Some(event) => pending_events_read.push(event),
6617                                 None => continue,
6618                         }
6619                 }
6620                 if forward_htlcs_count > 0 {
6621                         // If we have pending HTLCs to forward, assume we either dropped a
6622                         // `PendingHTLCsForwardable` or the user received it but never processed it as they
6623                         // shut down before the timer hit. Either way, set the time_forwardable to a small
6624                         // constant as enough time has likely passed that we should simply handle the forwards
6625                         // now, or at least after the user gets a chance to reconnect to our peers.
6626                         pending_events_read.push(events::Event::PendingHTLCsForwardable {
6627                                 time_forwardable: Duration::from_secs(2),
6628                         });
6629                 }
6630
6631                 let background_event_count: u64 = Readable::read(reader)?;
6632                 let mut pending_background_events_read: Vec<BackgroundEvent> = Vec::with_capacity(cmp::min(background_event_count as usize, MAX_ALLOC_SIZE/mem::size_of::<BackgroundEvent>()));
6633                 for _ in 0..background_event_count {
6634                         match <u8 as Readable>::read(reader)? {
6635                                 0 => pending_background_events_read.push(BackgroundEvent::ClosingMonitorUpdate((Readable::read(reader)?, Readable::read(reader)?))),
6636                                 _ => return Err(DecodeError::InvalidValue),
6637                         }
6638                 }
6639
6640                 let last_node_announcement_serial: u32 = Readable::read(reader)?;
6641                 let highest_seen_timestamp: u32 = Readable::read(reader)?;
6642
6643                 let pending_inbound_payment_count: u64 = Readable::read(reader)?;
6644                 let mut pending_inbound_payments: HashMap<PaymentHash, PendingInboundPayment> = HashMap::with_capacity(cmp::min(pending_inbound_payment_count as usize, MAX_ALLOC_SIZE/(3*32)));
6645                 for _ in 0..pending_inbound_payment_count {
6646                         if pending_inbound_payments.insert(Readable::read(reader)?, Readable::read(reader)?).is_some() {
6647                                 return Err(DecodeError::InvalidValue);
6648                         }
6649                 }
6650
6651                 let pending_outbound_payments_count_compat: u64 = Readable::read(reader)?;
6652                 let mut pending_outbound_payments_compat: HashMap<PaymentId, PendingOutboundPayment> =
6653                         HashMap::with_capacity(cmp::min(pending_outbound_payments_count_compat as usize, MAX_ALLOC_SIZE/32));
6654                 for _ in 0..pending_outbound_payments_count_compat {
6655                         let session_priv = Readable::read(reader)?;
6656                         let payment = PendingOutboundPayment::Legacy {
6657                                 session_privs: [session_priv].iter().cloned().collect()
6658                         };
6659                         if pending_outbound_payments_compat.insert(PaymentId(session_priv), payment).is_some() {
6660                                 return Err(DecodeError::InvalidValue)
6661                         };
6662                 }
6663
6664                 // pending_outbound_payments_no_retry is for compatibility with 0.0.101 clients.
6665                 let mut pending_outbound_payments_no_retry: Option<HashMap<PaymentId, HashSet<[u8; 32]>>> = None;
6666                 let mut pending_outbound_payments = None;
6667                 let mut received_network_pubkey: Option<PublicKey> = None;
6668                 let mut fake_scid_rand_bytes: Option<[u8; 32]> = None;
6669                 read_tlv_fields!(reader, {
6670                         (1, pending_outbound_payments_no_retry, option),
6671                         (3, pending_outbound_payments, option),
6672                         (5, received_network_pubkey, option),
6673                         (7, fake_scid_rand_bytes, option),
6674                 });
6675                 if fake_scid_rand_bytes.is_none() {
6676                         fake_scid_rand_bytes = Some(args.keys_manager.get_secure_random_bytes());
6677                 }
6678
6679                 if pending_outbound_payments.is_none() && pending_outbound_payments_no_retry.is_none() {
6680                         pending_outbound_payments = Some(pending_outbound_payments_compat);
6681                 } else if pending_outbound_payments.is_none() {
6682                         let mut outbounds = HashMap::new();
6683                         for (id, session_privs) in pending_outbound_payments_no_retry.unwrap().drain() {
6684                                 outbounds.insert(id, PendingOutboundPayment::Legacy { session_privs });
6685                         }
6686                         pending_outbound_payments = Some(outbounds);
6687                 } else {
6688                         // If we're tracking pending payments, ensure we haven't lost any by looking at the
6689                         // ChannelMonitor data for any channels for which we do not have authorative state
6690                         // (i.e. those for which we just force-closed above or we otherwise don't have a
6691                         // corresponding `Channel` at all).
6692                         // This avoids several edge-cases where we would otherwise "forget" about pending
6693                         // payments which are still in-flight via their on-chain state.
6694                         // We only rebuild the pending payments map if we were most recently serialized by
6695                         // 0.0.102+
6696                         for (_, monitor) in args.channel_monitors {
6697                                 if by_id.get(&monitor.get_funding_txo().0.to_channel_id()).is_none() {
6698                                         for (htlc_source, htlc) in monitor.get_pending_outbound_htlcs() {
6699                                                 if let HTLCSource::OutboundRoute { payment_id, session_priv, path, payment_secret, .. } = htlc_source {
6700                                                         if path.is_empty() {
6701                                                                 log_error!(args.logger, "Got an empty path for a pending payment");
6702                                                                 return Err(DecodeError::InvalidValue);
6703                                                         }
6704                                                         let path_amt = path.last().unwrap().fee_msat;
6705                                                         let mut session_priv_bytes = [0; 32];
6706                                                         session_priv_bytes[..].copy_from_slice(&session_priv[..]);
6707                                                         match pending_outbound_payments.as_mut().unwrap().entry(payment_id) {
6708                                                                 hash_map::Entry::Occupied(mut entry) => {
6709                                                                         let newly_added = entry.get_mut().insert(session_priv_bytes, &path);
6710                                                                         log_info!(args.logger, "{} a pending payment path for {} msat for session priv {} on an existing pending payment with payment hash {}",
6711                                                                                 if newly_added { "Added" } else { "Had" }, path_amt, log_bytes!(session_priv_bytes), log_bytes!(htlc.payment_hash.0));
6712                                                                 },
6713                                                                 hash_map::Entry::Vacant(entry) => {
6714                                                                         let path_fee = path.get_path_fees();
6715                                                                         entry.insert(PendingOutboundPayment::Retryable {
6716                                                                                 session_privs: [session_priv_bytes].iter().map(|a| *a).collect(),
6717                                                                                 payment_hash: htlc.payment_hash,
6718                                                                                 payment_secret,
6719                                                                                 pending_amt_msat: path_amt,
6720                                                                                 pending_fee_msat: Some(path_fee),
6721                                                                                 total_msat: path_amt,
6722                                                                                 starting_block_height: best_block_height,
6723                                                                         });
6724                                                                         log_info!(args.logger, "Added a pending payment for {} msat with payment hash {} for path with session priv {}",
6725                                                                                 path_amt, log_bytes!(htlc.payment_hash.0),  log_bytes!(session_priv_bytes));
6726                                                                 }
6727                                                         }
6728                                                 }
6729                                         }
6730                                 }
6731                         }
6732                 }
6733
6734                 let mut secp_ctx = Secp256k1::new();
6735                 secp_ctx.seeded_randomize(&args.keys_manager.get_secure_random_bytes());
6736
6737                 if !channel_closures.is_empty() {
6738                         pending_events_read.append(&mut channel_closures);
6739                 }
6740
6741                 let our_network_key = match args.keys_manager.get_node_secret(Recipient::Node) {
6742                         Ok(key) => key,
6743                         Err(()) => return Err(DecodeError::InvalidValue)
6744                 };
6745                 let our_network_pubkey = PublicKey::from_secret_key(&secp_ctx, &our_network_key);
6746                 if let Some(network_pubkey) = received_network_pubkey {
6747                         if network_pubkey != our_network_pubkey {
6748                                 log_error!(args.logger, "Key that was generated does not match the existing key.");
6749                                 return Err(DecodeError::InvalidValue);
6750                         }
6751                 }
6752
6753                 let mut outbound_scid_aliases = HashSet::new();
6754                 for (chan_id, chan) in by_id.iter_mut() {
6755                         if chan.outbound_scid_alias() == 0 {
6756                                 let mut outbound_scid_alias;
6757                                 loop {
6758                                         outbound_scid_alias = fake_scid::Namespace::OutboundAlias
6759                                                 .get_fake_scid(best_block_height, &genesis_hash, fake_scid_rand_bytes.as_ref().unwrap(), &args.keys_manager);
6760                                         if outbound_scid_aliases.insert(outbound_scid_alias) { break; }
6761                                 }
6762                                 chan.set_outbound_scid_alias(outbound_scid_alias);
6763                         } else if !outbound_scid_aliases.insert(chan.outbound_scid_alias()) {
6764                                 // Note that in rare cases its possible to hit this while reading an older
6765                                 // channel if we just happened to pick a colliding outbound alias above.
6766                                 log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
6767                                 return Err(DecodeError::InvalidValue);
6768                         }
6769                         if chan.is_usable() {
6770                                 if short_to_id.insert(chan.outbound_scid_alias(), *chan_id).is_some() {
6771                                         // Note that in rare cases its possible to hit this while reading an older
6772                                         // channel if we just happened to pick a colliding outbound alias above.
6773                                         log_error!(args.logger, "Got duplicate outbound SCID alias; {}", chan.outbound_scid_alias());
6774                                         return Err(DecodeError::InvalidValue);
6775                                 }
6776                         }
6777                 }
6778
6779                 let inbound_pmt_key_material = args.keys_manager.get_inbound_payment_key_material();
6780                 let expanded_inbound_key = inbound_payment::ExpandedKey::new(&inbound_pmt_key_material);
6781                 let channel_manager = ChannelManager {
6782                         genesis_hash,
6783                         fee_estimator: args.fee_estimator,
6784                         chain_monitor: args.chain_monitor,
6785                         tx_broadcaster: args.tx_broadcaster,
6786
6787                         best_block: RwLock::new(BestBlock::new(best_block_hash, best_block_height)),
6788
6789                         channel_state: Mutex::new(ChannelHolder {
6790                                 by_id,
6791                                 short_to_id,
6792                                 forward_htlcs,
6793                                 claimable_htlcs,
6794                                 pending_msg_events: Vec::new(),
6795                         }),
6796                         inbound_payment_key: expanded_inbound_key,
6797                         pending_inbound_payments: Mutex::new(pending_inbound_payments),
6798                         pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()),
6799
6800                         outbound_scid_aliases: Mutex::new(outbound_scid_aliases),
6801                         fake_scid_rand_bytes: fake_scid_rand_bytes.unwrap(),
6802
6803                         our_network_key,
6804                         our_network_pubkey,
6805                         secp_ctx,
6806
6807                         last_node_announcement_serial: AtomicUsize::new(last_node_announcement_serial as usize),
6808                         highest_seen_timestamp: AtomicUsize::new(highest_seen_timestamp as usize),
6809
6810                         per_peer_state: RwLock::new(per_peer_state),
6811
6812                         pending_events: Mutex::new(pending_events_read),
6813                         pending_background_events: Mutex::new(pending_background_events_read),
6814                         total_consistency_lock: RwLock::new(()),
6815                         persistence_notifier: PersistenceNotifier::new(),
6816
6817                         keys_manager: args.keys_manager,
6818                         logger: args.logger,
6819                         default_configuration: args.default_config,
6820                 };
6821
6822                 for htlc_source in failed_htlcs.drain(..) {
6823                         channel_manager.fail_htlc_backwards_internal(channel_manager.channel_state.lock().unwrap(), htlc_source.0, &htlc_source.1, HTLCFailReason::Reason { failure_code: 0x4000 | 8, data: Vec::new() });
6824                 }
6825
6826                 //TODO: Broadcast channel update for closed channels, but only after we've made a
6827                 //connection or two.
6828
6829                 Ok((best_block_hash.clone(), channel_manager))
6830         }
6831 }
6832
6833 #[cfg(test)]
6834 mod tests {
6835         use bitcoin::hashes::Hash;
6836         use bitcoin::hashes::sha256::Hash as Sha256;
6837         use core::time::Duration;
6838         use core::sync::atomic::Ordering;
6839         use ln::{PaymentPreimage, PaymentHash, PaymentSecret};
6840         use ln::channelmanager::{PaymentId, PaymentSendFailure};
6841         use ln::channelmanager::inbound_payment;
6842         use ln::features::InitFeatures;
6843         use ln::functional_test_utils::*;
6844         use ln::msgs;
6845         use ln::msgs::ChannelMessageHandler;
6846         use routing::router::{PaymentParameters, RouteParameters, find_route};
6847         use util::errors::APIError;
6848         use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
6849         use util::test_utils;
6850         use chain::keysinterface::KeysInterface;
6851
6852         #[cfg(feature = "std")]
6853         #[test]
6854         fn test_wait_timeout() {
6855                 use ln::channelmanager::PersistenceNotifier;
6856                 use sync::Arc;
6857                 use core::sync::atomic::AtomicBool;
6858                 use std::thread;
6859
6860                 let persistence_notifier = Arc::new(PersistenceNotifier::new());
6861                 let thread_notifier = Arc::clone(&persistence_notifier);
6862
6863                 let exit_thread = Arc::new(AtomicBool::new(false));
6864                 let exit_thread_clone = exit_thread.clone();
6865                 thread::spawn(move || {
6866                         loop {
6867                                 let &(ref persist_mtx, ref cnd) = &thread_notifier.persistence_lock;
6868                                 let mut persistence_lock = persist_mtx.lock().unwrap();
6869                                 *persistence_lock = true;
6870                                 cnd.notify_all();
6871
6872                                 if exit_thread_clone.load(Ordering::SeqCst) {
6873                                         break
6874                                 }
6875                         }
6876                 });
6877
6878                 // Check that we can block indefinitely until updates are available.
6879                 let _ = persistence_notifier.wait();
6880
6881                 // Check that the PersistenceNotifier will return after the given duration if updates are
6882                 // available.
6883                 loop {
6884                         if persistence_notifier.wait_timeout(Duration::from_millis(100)) {
6885                                 break
6886                         }
6887                 }
6888
6889                 exit_thread.store(true, Ordering::SeqCst);
6890
6891                 // Check that the PersistenceNotifier will return after the given duration even if no updates
6892                 // are available.
6893                 loop {
6894                         if !persistence_notifier.wait_timeout(Duration::from_millis(100)) {
6895                                 break
6896                         }
6897                 }
6898         }
6899
6900         #[test]
6901         fn test_notify_limits() {
6902                 // Check that a few cases which don't require the persistence of a new ChannelManager,
6903                 // indeed, do not cause the persistence of a new ChannelManager.
6904                 let chanmon_cfgs = create_chanmon_cfgs(3);
6905                 let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
6906                 let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
6907                 let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
6908
6909                 // All nodes start with a persistable update pending as `create_network` connects each node
6910                 // with all other nodes to make most tests simpler.
6911                 assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
6912                 assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
6913                 assert!(nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
6914
6915                 let mut chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6916
6917                 // We check that the channel info nodes have doesn't change too early, even though we try
6918                 // to connect messages with new values
6919                 chan.0.contents.fee_base_msat *= 2;
6920                 chan.1.contents.fee_base_msat *= 2;
6921                 let node_a_chan_info = nodes[0].node.list_channels()[0].clone();
6922                 let node_b_chan_info = nodes[1].node.list_channels()[0].clone();
6923
6924                 // The first two nodes (which opened a channel) should now require fresh persistence
6925                 assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
6926                 assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
6927                 // ... but the last node should not.
6928                 assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
6929                 // After persisting the first two nodes they should no longer need fresh persistence.
6930                 assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
6931                 assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
6932
6933                 // Node 3, unrelated to the only channel, shouldn't care if it receives a channel_update
6934                 // about the channel.
6935                 nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.0);
6936                 nodes[2].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &chan.1);
6937                 assert!(!nodes[2].node.await_persistable_update_timeout(Duration::from_millis(1)));
6938
6939                 // The nodes which are a party to the channel should also ignore messages from unrelated
6940                 // parties.
6941                 nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
6942                 nodes[0].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
6943                 nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.0);
6944                 nodes[1].node.handle_channel_update(&nodes[2].node.get_our_node_id(), &chan.1);
6945                 assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
6946                 assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
6947
6948                 // At this point the channel info given by peers should still be the same.
6949                 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
6950                 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
6951
6952                 // An earlier version of handle_channel_update didn't check the directionality of the
6953                 // update message and would always update the local fee info, even if our peer was
6954                 // (spuriously) forwarding us our own channel_update.
6955                 let as_node_one = nodes[0].node.get_our_node_id().serialize()[..] < nodes[1].node.get_our_node_id().serialize()[..];
6956                 let as_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.0 } else { &chan.1 };
6957                 let bs_update = if as_node_one == (chan.0.contents.flags & 1 == 0 /* chan.0 is from node one */) { &chan.1 } else { &chan.0 };
6958
6959                 // First deliver each peers' own message, checking that the node doesn't need to be
6960                 // persisted and that its channel info remains the same.
6961                 nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &as_update);
6962                 nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &bs_update);
6963                 assert!(!nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
6964                 assert!(!nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
6965                 assert_eq!(nodes[0].node.list_channels()[0], node_a_chan_info);
6966                 assert_eq!(nodes[1].node.list_channels()[0], node_b_chan_info);
6967
6968                 // Finally, deliver the other peers' message, ensuring each node needs to be persisted and
6969                 // the channel info has updated.
6970                 nodes[0].node.handle_channel_update(&nodes[1].node.get_our_node_id(), &bs_update);
6971                 nodes[1].node.handle_channel_update(&nodes[0].node.get_our_node_id(), &as_update);
6972                 assert!(nodes[0].node.await_persistable_update_timeout(Duration::from_millis(1)));
6973                 assert!(nodes[1].node.await_persistable_update_timeout(Duration::from_millis(1)));
6974                 assert_ne!(nodes[0].node.list_channels()[0], node_a_chan_info);
6975                 assert_ne!(nodes[1].node.list_channels()[0], node_b_chan_info);
6976         }
6977
6978         #[test]
6979         fn test_keysend_dup_hash_partial_mpp() {
6980                 // Test that a keysend payment with a duplicate hash to an existing partial MPP payment fails as
6981                 // expected.
6982                 let chanmon_cfgs = create_chanmon_cfgs(2);
6983                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
6984                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
6985                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
6986                 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
6987
6988                 // First, send a partial MPP payment.
6989                 let (route, our_payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100_000);
6990                 let payment_id = PaymentId([42; 32]);
6991                 // Use the utility function send_payment_along_path to send the payment with MPP data which
6992                 // indicates there are more HTLCs coming.
6993                 let cur_height = CHAN_CONFIRM_DEPTH + 1; // route_payment calls send_payment, which adds 1 to the current height. So we do the same here to match.
6994                 nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
6995                 check_added_monitors!(nodes[0], 1);
6996                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
6997                 assert_eq!(events.len(), 1);
6998                 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), false, None);
6999
7000                 // Next, send a keysend payment with the same payment_hash and make sure it fails.
7001                 nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
7002                 check_added_monitors!(nodes[0], 1);
7003                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7004                 assert_eq!(events.len(), 1);
7005                 let ev = events.drain(..).next().unwrap();
7006                 let payment_event = SendEvent::from_event(ev);
7007                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7008                 check_added_monitors!(nodes[1], 0);
7009                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7010                 expect_pending_htlcs_forwardable!(nodes[1]);
7011                 expect_pending_htlcs_forwardable!(nodes[1]);
7012                 check_added_monitors!(nodes[1], 1);
7013                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7014                 assert!(updates.update_add_htlcs.is_empty());
7015                 assert!(updates.update_fulfill_htlcs.is_empty());
7016                 assert_eq!(updates.update_fail_htlcs.len(), 1);
7017                 assert!(updates.update_fail_malformed_htlcs.is_empty());
7018                 assert!(updates.update_fee.is_none());
7019                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
7020                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
7021                 expect_payment_failed!(nodes[0], our_payment_hash, true);
7022
7023                 // Send the second half of the original MPP payment.
7024                 nodes[0].node.send_payment_along_path(&route.paths[0], &route.payment_params, &our_payment_hash, &Some(payment_secret), 200_000, cur_height, payment_id, &None).unwrap();
7025                 check_added_monitors!(nodes[0], 1);
7026                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7027                 assert_eq!(events.len(), 1);
7028                 pass_along_path(&nodes[0], &[&nodes[1]], 200_000, our_payment_hash, Some(payment_secret), events.drain(..).next().unwrap(), true, None);
7029
7030                 // Claim the full MPP payment. Note that we can't use a test utility like
7031                 // claim_funds_along_route because the ordering of the messages causes the second half of the
7032                 // payment to be put in the holding cell, which confuses the test utilities. So we exchange the
7033                 // lightning messages manually.
7034                 assert!(nodes[1].node.claim_funds(payment_preimage));
7035                 check_added_monitors!(nodes[1], 2);
7036                 let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7037                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
7038                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
7039                 check_added_monitors!(nodes[0], 1);
7040                 let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7041                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa);
7042                 check_added_monitors!(nodes[1], 1);
7043                 let bs_second_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7044                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_first_cs);
7045                 check_added_monitors!(nodes[1], 1);
7046                 let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
7047                 nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
7048                 nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
7049                 check_added_monitors!(nodes[0], 1);
7050                 let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id());
7051                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
7052                 let as_second_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7053                 check_added_monitors!(nodes[0], 1);
7054                 nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa);
7055                 check_added_monitors!(nodes[1], 1);
7056                 nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_updates.commitment_signed);
7057                 check_added_monitors!(nodes[1], 1);
7058                 let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());
7059                 nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa);
7060                 check_added_monitors!(nodes[0], 1);
7061
7062                 // Note that successful MPP payments will generate a single PaymentSent event upon the first
7063                 // path's success and a PaymentPathSuccessful event for each path's success.
7064                 let events = nodes[0].node.get_and_clear_pending_events();
7065                 assert_eq!(events.len(), 3);
7066                 match events[0] {
7067                         Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => {
7068                                 assert_eq!(Some(payment_id), *id);
7069                                 assert_eq!(payment_preimage, *preimage);
7070                                 assert_eq!(our_payment_hash, *hash);
7071                         },
7072                         _ => panic!("Unexpected event"),
7073                 }
7074                 match events[1] {
7075                         Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
7076                                 assert_eq!(payment_id, *actual_payment_id);
7077                                 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
7078                                 assert_eq!(route.paths[0], *path);
7079                         },
7080                         _ => panic!("Unexpected event"),
7081                 }
7082                 match events[2] {
7083                         Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
7084                                 assert_eq!(payment_id, *actual_payment_id);
7085                                 assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
7086                                 assert_eq!(route.paths[0], *path);
7087                         },
7088                         _ => panic!("Unexpected event"),
7089                 }
7090         }
7091
7092         #[test]
7093         fn test_keysend_dup_payment_hash() {
7094                 // (1): Test that a keysend payment with a duplicate payment hash to an existing pending
7095                 //      outbound regular payment fails as expected.
7096                 // (2): Test that a regular payment with a duplicate payment hash to an existing keysend payment
7097                 //      fails as expected.
7098                 let chanmon_cfgs = create_chanmon_cfgs(2);
7099                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7100                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7101                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7102                 create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known());
7103                 let scorer = test_utils::TestScorer::with_penalty(0);
7104                 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7105
7106                 // To start (1), send a regular payment but don't claim it.
7107                 let expected_route = [&nodes[1]];
7108                 let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &expected_route, 100_000);
7109
7110                 // Next, attempt a keysend payment and make sure it fails.
7111                 let route_params = RouteParameters {
7112                         payment_params: PaymentParameters::for_keysend(expected_route.last().unwrap().node.get_our_node_id()),
7113                         final_value_msat: 100_000,
7114                         final_cltv_expiry_delta: TEST_FINAL_CLTV,
7115                 };
7116                 let route = find_route(
7117                         &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None,
7118                         nodes[0].logger, &scorer, &random_seed_bytes
7119                 ).unwrap();
7120                 nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
7121                 check_added_monitors!(nodes[0], 1);
7122                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7123                 assert_eq!(events.len(), 1);
7124                 let ev = events.drain(..).next().unwrap();
7125                 let payment_event = SendEvent::from_event(ev);
7126                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7127                 check_added_monitors!(nodes[1], 0);
7128                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7129                 expect_pending_htlcs_forwardable!(nodes[1]);
7130                 expect_pending_htlcs_forwardable!(nodes[1]);
7131                 check_added_monitors!(nodes[1], 1);
7132                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7133                 assert!(updates.update_add_htlcs.is_empty());
7134                 assert!(updates.update_fulfill_htlcs.is_empty());
7135                 assert_eq!(updates.update_fail_htlcs.len(), 1);
7136                 assert!(updates.update_fail_malformed_htlcs.is_empty());
7137                 assert!(updates.update_fee.is_none());
7138                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
7139                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
7140                 expect_payment_failed!(nodes[0], payment_hash, true);
7141
7142                 // Finally, claim the original payment.
7143                 claim_payment(&nodes[0], &expected_route, payment_preimage);
7144
7145                 // To start (2), send a keysend payment but don't claim it.
7146                 let payment_preimage = PaymentPreimage([42; 32]);
7147                 let route = find_route(
7148                         &nodes[0].node.get_our_node_id(), &route_params, nodes[0].network_graph, None,
7149                         nodes[0].logger, &scorer, &random_seed_bytes
7150                 ).unwrap();
7151                 let (payment_hash, _) = nodes[0].node.send_spontaneous_payment(&route, Some(payment_preimage)).unwrap();
7152                 check_added_monitors!(nodes[0], 1);
7153                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7154                 assert_eq!(events.len(), 1);
7155                 let event = events.pop().unwrap();
7156                 let path = vec![&nodes[1]];
7157                 pass_along_path(&nodes[0], &path, 100_000, payment_hash, None, event, true, Some(payment_preimage));
7158
7159                 // Next, attempt a regular payment and make sure it fails.
7160                 let payment_secret = PaymentSecret([43; 32]);
7161                 nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
7162                 check_added_monitors!(nodes[0], 1);
7163                 let mut events = nodes[0].node.get_and_clear_pending_msg_events();
7164                 assert_eq!(events.len(), 1);
7165                 let ev = events.drain(..).next().unwrap();
7166                 let payment_event = SendEvent::from_event(ev);
7167                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]);
7168                 check_added_monitors!(nodes[1], 0);
7169                 commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false);
7170                 expect_pending_htlcs_forwardable!(nodes[1]);
7171                 expect_pending_htlcs_forwardable!(nodes[1]);
7172                 check_added_monitors!(nodes[1], 1);
7173                 let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
7174                 assert!(updates.update_add_htlcs.is_empty());
7175                 assert!(updates.update_fulfill_htlcs.is_empty());
7176                 assert_eq!(updates.update_fail_htlcs.len(), 1);
7177                 assert!(updates.update_fail_malformed_htlcs.is_empty());
7178                 assert!(updates.update_fee.is_none());
7179                 nodes[0].node.handle_update_fail_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fail_htlcs[0]);
7180                 commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, true, true);
7181                 expect_payment_failed!(nodes[0], payment_hash, true);
7182
7183                 // Finally, succeed the keysend payment.
7184                 claim_payment(&nodes[0], &expected_route, payment_preimage);
7185         }
7186
7187         #[test]
7188         fn test_keysend_hash_mismatch() {
7189                 // Test that if we receive a keysend `update_add_htlc` msg, we fail as expected if the keysend
7190                 // preimage doesn't match the msg's payment hash.
7191                 let chanmon_cfgs = create_chanmon_cfgs(2);
7192                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7193                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7194                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7195
7196                 let payer_pubkey = nodes[0].node.get_our_node_id();
7197                 let payee_pubkey = nodes[1].node.get_our_node_id();
7198                 nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
7199                 nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
7200
7201                 let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
7202                 let route_params = RouteParameters {
7203                         payment_params: PaymentParameters::for_keysend(payee_pubkey),
7204                         final_value_msat: 10000,
7205                         final_cltv_expiry_delta: 40,
7206                 };
7207                 let network_graph = nodes[0].network_graph;
7208                 let first_hops = nodes[0].node.list_usable_channels();
7209                 let scorer = test_utils::TestScorer::with_penalty(0);
7210                 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7211                 let route = find_route(
7212                         &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
7213                         nodes[0].logger, &scorer, &random_seed_bytes
7214                 ).unwrap();
7215
7216                 let test_preimage = PaymentPreimage([42; 32]);
7217                 let mismatch_payment_hash = PaymentHash([43; 32]);
7218                 let _ = nodes[0].node.send_payment_internal(&route, mismatch_payment_hash, &None, Some(test_preimage), None, None).unwrap();
7219                 check_added_monitors!(nodes[0], 1);
7220
7221                 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7222                 assert_eq!(updates.update_add_htlcs.len(), 1);
7223                 assert!(updates.update_fulfill_htlcs.is_empty());
7224                 assert!(updates.update_fail_htlcs.is_empty());
7225                 assert!(updates.update_fail_malformed_htlcs.is_empty());
7226                 assert!(updates.update_fee.is_none());
7227                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7228
7229                 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "Payment preimage didn't match payment hash".to_string(), 1);
7230         }
7231
7232         #[test]
7233         fn test_keysend_msg_with_secret_err() {
7234                 // Test that we error as expected if we receive a keysend payment that includes a payment secret.
7235                 let chanmon_cfgs = create_chanmon_cfgs(2);
7236                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7237                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7238                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7239
7240                 let payer_pubkey = nodes[0].node.get_our_node_id();
7241                 let payee_pubkey = nodes[1].node.get_our_node_id();
7242                 nodes[0].node.peer_connected(&payee_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
7243                 nodes[1].node.peer_connected(&payer_pubkey, &msgs::Init { features: InitFeatures::known(), remote_network_address: None });
7244
7245                 let _chan = create_chan_between_nodes(&nodes[0], &nodes[1], InitFeatures::known(), InitFeatures::known());
7246                 let route_params = RouteParameters {
7247                         payment_params: PaymentParameters::for_keysend(payee_pubkey),
7248                         final_value_msat: 10000,
7249                         final_cltv_expiry_delta: 40,
7250                 };
7251                 let network_graph = nodes[0].network_graph;
7252                 let first_hops = nodes[0].node.list_usable_channels();
7253                 let scorer = test_utils::TestScorer::with_penalty(0);
7254                 let random_seed_bytes = chanmon_cfgs[1].keys_manager.get_secure_random_bytes();
7255                 let route = find_route(
7256                         &payer_pubkey, &route_params, network_graph, Some(&first_hops.iter().collect::<Vec<_>>()),
7257                         nodes[0].logger, &scorer, &random_seed_bytes
7258                 ).unwrap();
7259
7260                 let test_preimage = PaymentPreimage([42; 32]);
7261                 let test_secret = PaymentSecret([43; 32]);
7262                 let payment_hash = PaymentHash(Sha256::hash(&test_preimage.0).into_inner());
7263                 let _ = nodes[0].node.send_payment_internal(&route, payment_hash, &Some(test_secret), Some(test_preimage), None, None).unwrap();
7264                 check_added_monitors!(nodes[0], 1);
7265
7266                 let updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id());
7267                 assert_eq!(updates.update_add_htlcs.len(), 1);
7268                 assert!(updates.update_fulfill_htlcs.is_empty());
7269                 assert!(updates.update_fail_htlcs.is_empty());
7270                 assert!(updates.update_fail_malformed_htlcs.is_empty());
7271                 assert!(updates.update_fee.is_none());
7272                 nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
7273
7274                 nodes[1].logger.assert_log_contains("lightning::ln::channelmanager".to_string(), "We don't support MPP keysend payments".to_string(), 1);
7275         }
7276
7277         #[test]
7278         fn test_multi_hop_missing_secret() {
7279                 let chanmon_cfgs = create_chanmon_cfgs(4);
7280                 let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
7281                 let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
7282                 let nodes = create_network(4, &node_cfgs, &node_chanmgrs);
7283
7284                 let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
7285                 let chan_2_id = create_announced_chan_between_nodes(&nodes, 0, 2, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
7286                 let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
7287                 let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3, InitFeatures::known(), InitFeatures::known()).0.contents.short_channel_id;
7288
7289                 // Marshall an MPP route.
7290                 let (mut route, payment_hash, _, _) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000);
7291                 let path = route.paths[0].clone();
7292                 route.paths.push(path);
7293                 route.paths[0][0].pubkey = nodes[1].node.get_our_node_id();
7294                 route.paths[0][0].short_channel_id = chan_1_id;
7295                 route.paths[0][1].short_channel_id = chan_3_id;
7296                 route.paths[1][0].pubkey = nodes[2].node.get_our_node_id();
7297                 route.paths[1][0].short_channel_id = chan_2_id;
7298                 route.paths[1][1].short_channel_id = chan_4_id;
7299
7300                 match nodes[0].node.send_payment(&route, payment_hash, &None).unwrap_err() {
7301                         PaymentSendFailure::ParameterError(APIError::APIMisuseError { ref err }) => {
7302                                 assert!(regex::Regex::new(r"Payment secret is required for multi-path payments").unwrap().is_match(err))                        },
7303                         _ => panic!("unexpected error")
7304                 }
7305         }
7306
7307         #[test]
7308         fn bad_inbound_payment_hash() {
7309                 // Add coverage for checking that a user-provided payment hash matches the payment secret.
7310                 let chanmon_cfgs = create_chanmon_cfgs(2);
7311                 let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
7312                 let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
7313                 let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
7314
7315                 let (_, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]);
7316                 let payment_data = msgs::FinalOnionHopData {
7317                         payment_secret,
7318                         total_msat: 100_000,
7319                 };
7320
7321                 // Ensure that if the payment hash given to `inbound_payment::verify` differs from the original,
7322                 // payment verification fails as expected.
7323                 let mut bad_payment_hash = payment_hash.clone();
7324                 bad_payment_hash.0[0] += 1;
7325                 match inbound_payment::verify(bad_payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger) {
7326                         Ok(_) => panic!("Unexpected ok"),
7327                         Err(()) => {
7328                                 nodes[0].logger.assert_log_contains("lightning::ln::inbound_payment".to_string(), "Failing HTLC with user-generated payment_hash".to_string(), 1);
7329                         }
7330                 }
7331
7332                 // Check that using the original payment hash succeeds.
7333                 assert!(inbound_payment::verify(payment_hash, &payment_data, nodes[0].node.highest_seen_timestamp.load(Ordering::Acquire) as u64, &nodes[0].node.inbound_payment_key, &nodes[0].logger).is_ok());
7334         }
7335 }
7336
7337 #[cfg(all(any(test, feature = "_test_utils"), feature = "_bench_unstable"))]
7338 pub mod bench {
7339         use chain::Listen;
7340         use chain::chainmonitor::{ChainMonitor, Persist};
7341         use chain::keysinterface::{KeysManager, KeysInterface, InMemorySigner};
7342         use ln::channelmanager::{BestBlock, ChainParameters, ChannelManager, PaymentHash, PaymentPreimage};
7343         use ln::features::{InitFeatures, InvoiceFeatures};
7344         use ln::functional_test_utils::*;
7345         use ln::msgs::{ChannelMessageHandler, Init};
7346         use routing::network_graph::NetworkGraph;
7347         use routing::router::{PaymentParameters, get_route};
7348         use util::test_utils;
7349         use util::config::UserConfig;
7350         use util::events::{Event, MessageSendEvent, MessageSendEventsProvider};
7351
7352         use bitcoin::hashes::Hash;
7353         use bitcoin::hashes::sha256::Hash as Sha256;
7354         use bitcoin::{Block, BlockHeader, Transaction, TxOut};
7355
7356         use sync::{Arc, Mutex};
7357
7358         use test::Bencher;
7359
7360         struct NodeHolder<'a, P: Persist<InMemorySigner>> {
7361                 node: &'a ChannelManager<InMemorySigner,
7362                         &'a ChainMonitor<InMemorySigner, &'a test_utils::TestChainSource,
7363                                 &'a test_utils::TestBroadcaster, &'a test_utils::TestFeeEstimator,
7364                                 &'a test_utils::TestLogger, &'a P>,
7365                         &'a test_utils::TestBroadcaster, &'a KeysManager,
7366                         &'a test_utils::TestFeeEstimator, &'a test_utils::TestLogger>
7367         }
7368
7369         #[cfg(test)]
7370         #[bench]
7371         fn bench_sends(bench: &mut Bencher) {
7372                 bench_two_sends(bench, test_utils::TestPersister::new(), test_utils::TestPersister::new());
7373         }
7374
7375         pub fn bench_two_sends<P: Persist<InMemorySigner>>(bench: &mut Bencher, persister_a: P, persister_b: P) {
7376                 // Do a simple benchmark of sending a payment back and forth between two nodes.
7377                 // Note that this is unrealistic as each payment send will require at least two fsync
7378                 // calls per node.
7379                 let network = bitcoin::Network::Testnet;
7380                 let genesis_hash = bitcoin::blockdata::constants::genesis_block(network).header.block_hash();
7381
7382                 let tx_broadcaster = test_utils::TestBroadcaster{txn_broadcasted: Mutex::new(Vec::new()), blocks: Arc::new(Mutex::new(Vec::new()))};
7383                 let fee_estimator = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) };
7384
7385                 let mut config: UserConfig = Default::default();
7386                 config.own_channel_config.minimum_depth = 1;
7387
7388                 let logger_a = test_utils::TestLogger::with_id("node a".to_owned());
7389                 let chain_monitor_a = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_a);
7390                 let seed_a = [1u8; 32];
7391                 let keys_manager_a = KeysManager::new(&seed_a, 42, 42);
7392                 let node_a = ChannelManager::new(&fee_estimator, &chain_monitor_a, &tx_broadcaster, &logger_a, &keys_manager_a, config.clone(), ChainParameters {
7393                         network,
7394                         best_block: BestBlock::from_genesis(network),
7395                 });
7396                 let node_a_holder = NodeHolder { node: &node_a };
7397
7398                 let logger_b = test_utils::TestLogger::with_id("node a".to_owned());
7399                 let chain_monitor_b = ChainMonitor::new(None, &tx_broadcaster, &logger_a, &fee_estimator, &persister_b);
7400                 let seed_b = [2u8; 32];
7401                 let keys_manager_b = KeysManager::new(&seed_b, 42, 42);
7402                 let node_b = ChannelManager::new(&fee_estimator, &chain_monitor_b, &tx_broadcaster, &logger_b, &keys_manager_b, config.clone(), ChainParameters {
7403                         network,
7404                         best_block: BestBlock::from_genesis(network),
7405                 });
7406                 let node_b_holder = NodeHolder { node: &node_b };
7407
7408                 node_a.peer_connected(&node_b.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
7409                 node_b.peer_connected(&node_a.get_our_node_id(), &Init { features: InitFeatures::known(), remote_network_address: None });
7410                 node_a.create_channel(node_b.get_our_node_id(), 8_000_000, 100_000_000, 42, None).unwrap();
7411                 node_b.handle_open_channel(&node_a.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_a_holder, MessageSendEvent::SendOpenChannel, node_b.get_our_node_id()));
7412                 node_a.handle_accept_channel(&node_b.get_our_node_id(), InitFeatures::known(), &get_event_msg!(node_b_holder, MessageSendEvent::SendAcceptChannel, node_a.get_our_node_id()));
7413
7414                 let tx;
7415                 if let Event::FundingGenerationReady { temporary_channel_id, output_script, .. } = get_event!(node_a_holder, Event::FundingGenerationReady) {
7416                         tx = Transaction { version: 2, lock_time: 0, input: Vec::new(), output: vec![TxOut {
7417                                 value: 8_000_000, script_pubkey: output_script,
7418                         }]};
7419                         node_a.funding_transaction_generated(&temporary_channel_id, &node_b.get_our_node_id(), tx.clone()).unwrap();
7420                 } else { panic!(); }
7421
7422                 node_b.handle_funding_created(&node_a.get_our_node_id(), &get_event_msg!(node_a_holder, MessageSendEvent::SendFundingCreated, node_b.get_our_node_id()));
7423                 node_a.handle_funding_signed(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingSigned, node_a.get_our_node_id()));
7424
7425                 assert_eq!(&tx_broadcaster.txn_broadcasted.lock().unwrap()[..], &[tx.clone()]);
7426
7427                 let block = Block {
7428                         header: BlockHeader { version: 0x20000000, prev_blockhash: genesis_hash, merkle_root: Default::default(), time: 42, bits: 42, nonce: 42 },
7429                         txdata: vec![tx],
7430                 };
7431                 Listen::block_connected(&node_a, &block, 1);
7432                 Listen::block_connected(&node_b, &block, 1);
7433
7434                 node_a.handle_funding_locked(&node_b.get_our_node_id(), &get_event_msg!(node_b_holder, MessageSendEvent::SendFundingLocked, node_a.get_our_node_id()));
7435                 let msg_events = node_a.get_and_clear_pending_msg_events();
7436                 assert_eq!(msg_events.len(), 2);
7437                 match msg_events[0] {
7438                         MessageSendEvent::SendFundingLocked { ref msg, .. } => {
7439                                 node_b.handle_funding_locked(&node_a.get_our_node_id(), msg);
7440                                 get_event_msg!(node_b_holder, MessageSendEvent::SendChannelUpdate, node_a.get_our_node_id());
7441                         },
7442                         _ => panic!(),
7443                 }
7444                 match msg_events[1] {
7445                         MessageSendEvent::SendChannelUpdate { .. } => {},
7446                         _ => panic!(),
7447                 }
7448
7449                 let dummy_graph = NetworkGraph::new(genesis_hash);
7450
7451                 let mut payment_count: u64 = 0;
7452                 macro_rules! send_payment {
7453                         ($node_a: expr, $node_b: expr) => {
7454                                 let usable_channels = $node_a.list_usable_channels();
7455                                 let payment_params = PaymentParameters::from_node_id($node_b.get_our_node_id())
7456                                         .with_features(InvoiceFeatures::known());
7457                                 let scorer = test_utils::TestScorer::with_penalty(0);
7458                                 let seed = [3u8; 32];
7459                                 let keys_manager = KeysManager::new(&seed, 42, 42);
7460                                 let random_seed_bytes = keys_manager.get_secure_random_bytes();
7461                                 let route = get_route(&$node_a.get_our_node_id(), &payment_params, &dummy_graph.read_only(),
7462                                         Some(&usable_channels.iter().map(|r| r).collect::<Vec<_>>()), 10_000, TEST_FINAL_CLTV, &logger_a, &scorer, &random_seed_bytes).unwrap();
7463
7464                                 let mut payment_preimage = PaymentPreimage([0; 32]);
7465                                 payment_preimage.0[0..8].copy_from_slice(&payment_count.to_le_bytes());
7466                                 payment_count += 1;
7467                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).into_inner());
7468                                 let payment_secret = $node_b.create_inbound_payment_for_hash(payment_hash, None, 7200).unwrap();
7469
7470                                 $node_a.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
7471                                 let payment_event = SendEvent::from_event($node_a.get_and_clear_pending_msg_events().pop().unwrap());
7472                                 $node_b.handle_update_add_htlc(&$node_a.get_our_node_id(), &payment_event.msgs[0]);
7473                                 $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &payment_event.commitment_msg);
7474                                 let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_b }, $node_a.get_our_node_id());
7475                                 $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &raa);
7476                                 $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &cs);
7477                                 $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_a }, MessageSendEvent::SendRevokeAndACK, $node_b.get_our_node_id()));
7478
7479                                 expect_pending_htlcs_forwardable!(NodeHolder { node: &$node_b });
7480                                 expect_payment_received!(NodeHolder { node: &$node_b }, payment_hash, payment_secret, 10_000);
7481                                 assert!($node_b.claim_funds(payment_preimage));
7482
7483                                 match $node_b.get_and_clear_pending_msg_events().pop().unwrap() {
7484                                         MessageSendEvent::UpdateHTLCs { node_id, updates } => {
7485                                                 assert_eq!(node_id, $node_a.get_our_node_id());
7486                                                 $node_a.handle_update_fulfill_htlc(&$node_b.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
7487                                                 $node_a.handle_commitment_signed(&$node_b.get_our_node_id(), &updates.commitment_signed);
7488                                         },
7489                                         _ => panic!("Failed to generate claim event"),
7490                                 }
7491
7492                                 let (raa, cs) = get_revoke_commit_msgs!(NodeHolder { node: &$node_a }, $node_b.get_our_node_id());
7493                                 $node_b.handle_revoke_and_ack(&$node_a.get_our_node_id(), &raa);
7494                                 $node_b.handle_commitment_signed(&$node_a.get_our_node_id(), &cs);
7495                                 $node_a.handle_revoke_and_ack(&$node_b.get_our_node_id(), &get_event_msg!(NodeHolder { node: &$node_b }, MessageSendEvent::SendRevokeAndACK, $node_a.get_our_node_id()));
7496
7497                                 expect_payment_sent!(NodeHolder { node: &$node_a }, payment_preimage);
7498                         }
7499                 }
7500
7501                 bench.iter(|| {
7502                         send_payment!(node_a, node_b);
7503                         send_payment!(node_b, node_a);
7504                 });
7505         }
7506 }