]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/channel.rs
a1af5e6da6854e30bc56b7b23e041114d9678a1b
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::sighash;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
16
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
21
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
26
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs;
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
49
50 use crate::io;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
54 use core::ops::Deref;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
58
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
60
61 #[cfg(test)]
62 pub struct ChannelValueStat {
63         pub value_to_self_msat: u64,
64         pub channel_value_msat: u64,
65         pub channel_reserve_msat: u64,
66         pub pending_outbound_htlcs_amount_msat: u64,
67         pub pending_inbound_htlcs_amount_msat: u64,
68         pub holding_cell_outbound_amount_msat: u64,
69         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70         pub counterparty_dust_limit_msat: u64,
71 }
72
73 pub struct AvailableBalances {
74         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75         pub balance_msat: u64,
76         /// Total amount available for our counterparty to send to us.
77         pub inbound_capacity_msat: u64,
78         /// Total amount available for us to send to our counterparty.
79         pub outbound_capacity_msat: u64,
80         /// The maximum value we can assign to the next outbound HTLC
81         pub next_outbound_htlc_limit_msat: u64,
82         /// The minimum value we can assign to the next outbound HTLC
83         pub next_outbound_htlc_minimum_msat: u64,
84 }
85
86 #[derive(Debug, Clone, Copy, PartialEq)]
87 enum FeeUpdateState {
88         // Inbound states mirroring InboundHTLCState
89         RemoteAnnounced,
90         AwaitingRemoteRevokeToAnnounce,
91         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
96
97         // Outbound state can only be `LocalAnnounced` or `Committed`
98         Outbound,
99 }
100
101 enum InboundHTLCRemovalReason {
102         FailRelay(msgs::OnionErrorPacket),
103         FailMalformed(([u8; 32], u16)),
104         Fulfill(PaymentPreimage),
105 }
106
107 enum InboundHTLCState {
108         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109         /// update_add_htlc message for this HTLC.
110         RemoteAnnounced(PendingHTLCStatus),
111         /// Included in a received commitment_signed message (implying we've
112         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113         /// state (see the example below). We have not yet included this HTLC in a
114         /// commitment_signed message because we are waiting on the remote's
115         /// aforementioned state revocation. One reason this missing remote RAA
116         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117         /// is because every time we create a new "state", i.e. every time we sign a
118         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120         /// sent provided the per_commitment_point for our current commitment tx.
121         /// The other reason we should not send a commitment_signed without their RAA
122         /// is because their RAA serves to ACK our previous commitment_signed.
123         ///
124         /// Here's an example of how an HTLC could come to be in this state:
125         /// remote --> update_add_htlc(prev_htlc)   --> local
126         /// remote --> commitment_signed(prev_htlc) --> local
127         /// remote <-- revoke_and_ack               <-- local
128         /// remote <-- commitment_signed(prev_htlc) <-- local
129         /// [note that here, the remote does not respond with a RAA]
130         /// remote --> update_add_htlc(this_htlc)   --> local
131         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132         /// Now `this_htlc` will be assigned this state. It's unable to be officially
133         /// accepted, i.e. included in a commitment_signed, because we're missing the
134         /// RAA that provides our next per_commitment_point. The per_commitment_point
135         /// is used to derive commitment keys, which are used to construct the
136         /// signatures in a commitment_signed message.
137         /// Implies AwaitingRemoteRevoke.
138         ///
139         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144         /// channel (before it can then get forwarded and/or removed).
145         /// Implies AwaitingRemoteRevoke.
146         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
147         Committed,
148         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
150         /// we'll drop it.
151         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152         /// commitment transaction without it as otherwise we'll have to force-close the channel to
153         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154         /// anyway). That said, ChannelMonitor does this for us (see
155         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156         /// our own local state before then, once we're sure that the next commitment_signed and
157         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158         LocalRemoved(InboundHTLCRemovalReason),
159 }
160
161 struct InboundHTLCOutput {
162         htlc_id: u64,
163         amount_msat: u64,
164         cltv_expiry: u32,
165         payment_hash: PaymentHash,
166         state: InboundHTLCState,
167 }
168
169 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
170 enum OutboundHTLCState {
171         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
172         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
173         /// we will promote to Committed (note that they may not accept it until the next time we
174         /// revoke, but we don't really care about that:
175         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
176         ///    money back (though we won't), and,
177         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
178         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
179         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
180         ///    we'll never get out of sync).
181         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
182         /// OutboundHTLCOutput's size just for a temporary bit
183         LocalAnnounced(Box<msgs::OnionPacket>),
184         Committed,
185         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
186         /// the change (though they'll need to revoke before we fail the payment).
187         RemoteRemoved(OutboundHTLCOutcome),
188         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
189         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
190         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
191         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
192         /// remote revoke_and_ack on a previous state before we can do so.
193         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
194         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
195         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
196         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
197         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
198         /// revoke_and_ack to drop completely.
199         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
200 }
201
202 #[derive(Clone)]
203 #[cfg_attr(test, derive(Debug, PartialEq))]
204 enum OutboundHTLCOutcome {
205         /// LDK version 0.0.105+ will always fill in the preimage here.
206         Success(Option<PaymentPreimage>),
207         Failure(HTLCFailReason),
208 }
209
210 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
211         fn from(o: Option<HTLCFailReason>) -> Self {
212                 match o {
213                         None => OutboundHTLCOutcome::Success(None),
214                         Some(r) => OutboundHTLCOutcome::Failure(r)
215                 }
216         }
217 }
218
219 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
220         fn into(self) -> Option<&'a HTLCFailReason> {
221                 match self {
222                         OutboundHTLCOutcome::Success(_) => None,
223                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
224                 }
225         }
226 }
227
228 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
229 struct OutboundHTLCOutput {
230         htlc_id: u64,
231         amount_msat: u64,
232         cltv_expiry: u32,
233         payment_hash: PaymentHash,
234         state: OutboundHTLCState,
235         source: HTLCSource,
236         blinding_point: Option<PublicKey>,
237         skimmed_fee_msat: Option<u64>,
238 }
239
240 /// See AwaitingRemoteRevoke ChannelState for more info
241 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
242 enum HTLCUpdateAwaitingACK {
243         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
244                 // always outbound
245                 amount_msat: u64,
246                 cltv_expiry: u32,
247                 payment_hash: PaymentHash,
248                 source: HTLCSource,
249                 onion_routing_packet: msgs::OnionPacket,
250                 // The extra fee we're skimming off the top of this HTLC.
251                 skimmed_fee_msat: Option<u64>,
252                 blinding_point: Option<PublicKey>,
253         },
254         ClaimHTLC {
255                 payment_preimage: PaymentPreimage,
256                 htlc_id: u64,
257         },
258         FailHTLC {
259                 htlc_id: u64,
260                 err_packet: msgs::OnionErrorPacket,
261         },
262         FailMalformedHTLC {
263                 htlc_id: u64,
264                 failure_code: u16,
265                 sha256_of_onion: [u8; 32],
266         },
267 }
268
269 macro_rules! define_state_flags {
270         ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr)),+], $extra_flags: expr) => {
271                 #[doc = $flag_type_doc]
272                 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
273                 struct $flag_type(u32);
274
275                 impl $flag_type {
276                         $(
277                                 #[doc = $flag_doc]
278                                 const $flag: $flag_type = $flag_type($value);
279                         )*
280
281                         /// All flags that apply to the specified [`ChannelState`] variant.
282                         #[allow(unused)]
283                         const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
284
285                         #[allow(unused)]
286                         fn new() -> Self { Self(0) }
287
288                         #[allow(unused)]
289                         fn from_u32(flags: u32) -> Result<Self, ()> {
290                                 if flags & !Self::ALL.0 != 0 {
291                                         Err(())
292                                 } else {
293                                         Ok($flag_type(flags))
294                                 }
295                         }
296
297                         #[allow(unused)]
298                         fn is_empty(&self) -> bool { self.0 == 0 }
299
300                         #[allow(unused)]
301                         fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
302                 }
303
304                 impl core::ops::Not for $flag_type {
305                         type Output = Self;
306                         fn not(self) -> Self::Output { Self(!self.0) }
307                 }
308                 impl core::ops::BitOr for $flag_type {
309                         type Output = Self;
310                         fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
311                 }
312                 impl core::ops::BitOrAssign for $flag_type {
313                         fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
314                 }
315                 impl core::ops::BitAnd for $flag_type {
316                         type Output = Self;
317                         fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
318                 }
319                 impl core::ops::BitAndAssign for $flag_type {
320                         fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
321                 }
322         };
323         ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
324                 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
325         };
326         ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
327                 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
328                 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
329                         type Output = Self;
330                         fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
331                 }
332                 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
333                         fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
334                 }
335                 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
336                         type Output = Self;
337                         fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
338                 }
339                 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
340                         fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
341                 }
342                 impl PartialEq<FundedStateFlags> for $flag_type {
343                         fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
344                 }
345                 impl From<FundedStateFlags> for $flag_type {
346                         fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
347                 }
348         };
349 }
350
351 /// We declare all the states/flags here together to help determine which bits are still available
352 /// to choose.
353 mod state_flags {
354         pub const OUR_INIT_SENT: u32 = 1 << 0;
355         pub const THEIR_INIT_SENT: u32 = 1 << 1;
356         pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
357         pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
358         pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
359         pub const OUR_CHANNEL_READY: u32 = 1 << 5;
360         pub const CHANNEL_READY: u32 = 1 << 6;
361         pub const PEER_DISCONNECTED: u32 = 1 << 7;
362         pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
363         pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
364         pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
365         pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
366         pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
367         pub const WAITING_FOR_BATCH: u32 = 1 << 13;
368 }
369
370 define_state_flags!(
371         "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
372         FundedStateFlags, [
373                 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
374                         until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED),
375                 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
376                         somewhere and we should pause sending any outbound messages until they've managed to \
377                         complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS),
378                 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
379                         any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
380                         message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT),
381                 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
382                         the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT)
383         ]
384 );
385
386 define_state_flags!(
387         "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
388         NegotiatingFundingFlags, [
389                 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
390                         OUR_INIT_SENT, state_flags::OUR_INIT_SENT),
391                 ("Indicates we have received their `open_channel`/`accept_channel` message.",
392                         THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT)
393         ]
394 );
395
396 define_state_flags!(
397         "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
398         FUNDED_STATE, AwaitingChannelReadyFlags, [
399                 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
400                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
401                         THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY),
402                 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
403                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
404                         OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY),
405                 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
406                         is being held until all channels in the batch have received `funding_signed` and have \
407                         their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH)
408         ]
409 );
410
411 define_state_flags!(
412         "Flags that only apply to [`ChannelState::ChannelReady`].",
413         FUNDED_STATE, ChannelReadyFlags, [
414                 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
415                         `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
416                         messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
417                         implicit ACK, so instead we have to hold them away temporarily to be sent later.",
418                         AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE)
419         ]
420 );
421
422 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
423 enum ChannelState {
424         /// We are negotiating the parameters required for the channel prior to funding it.
425         NegotiatingFunding(NegotiatingFundingFlags),
426         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
427         /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
428         /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
429         FundingNegotiated,
430         /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
431         /// funding transaction to confirm.
432         AwaitingChannelReady(AwaitingChannelReadyFlags),
433         /// Both we and our counterparty consider the funding transaction confirmed and the channel is
434         /// now operational.
435         ChannelReady(ChannelReadyFlags),
436         /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
437         /// is about to drop us, but we store this anyway.
438         ShutdownComplete,
439 }
440
441 macro_rules! impl_state_flag {
442         ($get: ident, $set: ident, $clear: ident, $state_flag: expr, [$($state: ident),+]) => {
443                 #[allow(unused)]
444                 fn $get(&self) -> bool {
445                         match self {
446                                 $(
447                                         ChannelState::$state(flags) => flags.is_set($state_flag.into()),
448                                 )*
449                                 _ => false,
450                         }
451                 }
452                 #[allow(unused)]
453                 fn $set(&mut self) {
454                         match self {
455                                 $(
456                                         ChannelState::$state(flags) => *flags |= $state_flag,
457                                 )*
458                                 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
459                         }
460                 }
461                 #[allow(unused)]
462                 fn $clear(&mut self) {
463                         match self {
464                                 $(
465                                         ChannelState::$state(flags) => *flags &= !($state_flag),
466                                 )*
467                                 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
468                         }
469                 }
470         };
471         ($get: ident, $set: ident, $clear: ident, $state_flag: expr, FUNDED_STATES) => {
472                 impl_state_flag!($get, $set, $clear, $state_flag, [AwaitingChannelReady, ChannelReady]);
473         };
474         ($get: ident, $set: ident, $clear: ident, $state_flag: expr, $state: ident) => {
475                 impl_state_flag!($get, $set, $clear, $state_flag, [$state]);
476         };
477 }
478
479 impl ChannelState {
480         fn from_u32(state: u32) -> Result<Self, ()> {
481                 match state {
482                         state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
483                         state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
484                         val => {
485                                 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
486                                         AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
487                                                 .map(|flags| ChannelState::AwaitingChannelReady(flags))
488                                 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
489                                         ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
490                                                 .map(|flags| ChannelState::ChannelReady(flags))
491                                 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
492                                         Ok(ChannelState::NegotiatingFunding(flags))
493                                 } else {
494                                         Err(())
495                                 }
496                         },
497                 }
498         }
499
500         fn to_u32(&self) -> u32 {
501                 match self {
502                         ChannelState::NegotiatingFunding(flags) => flags.0,
503                         ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
504                         ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
505                         ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
506                         ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
507                 }
508         }
509
510         fn is_pre_funded_state(&self) -> bool {
511                 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
512         }
513
514         fn is_both_sides_shutdown(&self) -> bool {
515                 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
516         }
517
518         fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
519                 match self {
520                         ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
521                         ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
522                         _ => FundedStateFlags::new(),
523                 }
524         }
525
526         fn should_force_holding_cell(&self) -> bool {
527                 match self {
528                         ChannelState::ChannelReady(flags) =>
529                                 flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) ||
530                                         flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) ||
531                                         flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
532                         _ => {
533                                 debug_assert!(false, "The holding cell is only valid within ChannelReady");
534                                 false
535                         },
536                 }
537         }
538
539         impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected,
540                 FundedStateFlags::PEER_DISCONNECTED, FUNDED_STATES);
541         impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress,
542                 FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS, FUNDED_STATES);
543         impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent,
544                 FundedStateFlags::LOCAL_SHUTDOWN_SENT, FUNDED_STATES);
545         impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent,
546                 FundedStateFlags::REMOTE_SHUTDOWN_SENT, FUNDED_STATES);
547         impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready,
548                 AwaitingChannelReadyFlags::OUR_CHANNEL_READY, AwaitingChannelReady);
549         impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready,
550                 AwaitingChannelReadyFlags::THEIR_CHANNEL_READY, AwaitingChannelReady);
551         impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch,
552                 AwaitingChannelReadyFlags::WAITING_FOR_BATCH, AwaitingChannelReady);
553         impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke,
554                 ChannelReadyFlags::AWAITING_REMOTE_REVOKE, ChannelReady);
555 }
556
557 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
558
559 pub const DEFAULT_MAX_HTLCS: u16 = 50;
560
561 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
562         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
563         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
564         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
565 }
566
567 #[cfg(not(test))]
568 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
569 #[cfg(test)]
570 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
571
572 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
573
574 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
575 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
576 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
577 /// `holder_max_htlc_value_in_flight_msat`.
578 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
579
580 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
581 /// `option_support_large_channel` (aka wumbo channels) is not supported.
582 /// It's 2^24 - 1.
583 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
584
585 /// Total bitcoin supply in satoshis.
586 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
587
588 /// The maximum network dust limit for standard script formats. This currently represents the
589 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
590 /// transaction non-standard and thus refuses to relay it.
591 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
592 /// implementations use this value for their dust limit today.
593 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
594
595 /// The maximum channel dust limit we will accept from our counterparty.
596 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
597
598 /// The dust limit is used for both the commitment transaction outputs as well as the closing
599 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
600 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
601 /// In order to avoid having to concern ourselves with standardness during the closing process, we
602 /// simply require our counterparty to use a dust limit which will leave any segwit output
603 /// standard.
604 /// See <https://github.com/lightning/bolts/issues/905> for more details.
605 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
606
607 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
608 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
609
610 /// Used to return a simple Error back to ChannelManager. Will get converted to a
611 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
612 /// channel_id in ChannelManager.
613 pub(super) enum ChannelError {
614         Ignore(String),
615         Warn(String),
616         Close(String),
617 }
618
619 impl fmt::Debug for ChannelError {
620         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
621                 match self {
622                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
623                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
624                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
625                 }
626         }
627 }
628
629 impl fmt::Display for ChannelError {
630         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
631                 match self {
632                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
633                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
634                         &ChannelError::Close(ref e) => write!(f, "{}", e),
635                 }
636         }
637 }
638
639 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
640         pub logger: &'a L,
641         pub peer_id: Option<PublicKey>,
642         pub channel_id: Option<ChannelId>,
643 }
644
645 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
646         fn log(&self, mut record: Record) {
647                 record.peer_id = self.peer_id;
648                 record.channel_id = self.channel_id;
649                 self.logger.log(record)
650         }
651 }
652
653 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
654 where L::Target: Logger {
655         pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
656         where S::Target: SignerProvider
657         {
658                 WithChannelContext {
659                         logger,
660                         peer_id: Some(context.counterparty_node_id),
661                         channel_id: Some(context.channel_id),
662                 }
663         }
664 }
665
666 macro_rules! secp_check {
667         ($res: expr, $err: expr) => {
668                 match $res {
669                         Ok(thing) => thing,
670                         Err(_) => return Err(ChannelError::Close($err)),
671                 }
672         };
673 }
674
675 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
676 /// our counterparty or not. However, we don't want to announce updates right away to avoid
677 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
678 /// our channel_update message and track the current state here.
679 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
680 #[derive(Clone, Copy, PartialEq)]
681 pub(super) enum ChannelUpdateStatus {
682         /// We've announced the channel as enabled and are connected to our peer.
683         Enabled,
684         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
685         DisabledStaged(u8),
686         /// Our channel is live again, but we haven't announced the channel as enabled yet.
687         EnabledStaged(u8),
688         /// We've announced the channel as disabled.
689         Disabled,
690 }
691
692 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
693 #[derive(PartialEq)]
694 pub enum AnnouncementSigsState {
695         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
696         /// we sent the last `AnnouncementSignatures`.
697         NotSent,
698         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
699         /// This state never appears on disk - instead we write `NotSent`.
700         MessageSent,
701         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
702         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
703         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
704         /// they send back a `RevokeAndACK`.
705         /// This state never appears on disk - instead we write `NotSent`.
706         Committed,
707         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
708         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
709         PeerReceived,
710 }
711
712 /// An enum indicating whether the local or remote side offered a given HTLC.
713 enum HTLCInitiator {
714         LocalOffered,
715         RemoteOffered,
716 }
717
718 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
719 struct HTLCStats {
720         pending_htlcs: u32,
721         pending_htlcs_value_msat: u64,
722         on_counterparty_tx_dust_exposure_msat: u64,
723         on_holder_tx_dust_exposure_msat: u64,
724         holding_cell_msat: u64,
725         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
726 }
727
728 /// An enum gathering stats on commitment transaction, either local or remote.
729 struct CommitmentStats<'a> {
730         tx: CommitmentTransaction, // the transaction info
731         feerate_per_kw: u32, // the feerate included to build the transaction
732         total_fee_sat: u64, // the total fee included in the transaction
733         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
734         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
735         local_balance_msat: u64, // local balance before fees but considering dust limits
736         remote_balance_msat: u64, // remote balance before fees but considering dust limits
737         outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
738         inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
739 }
740
741 /// Used when calculating whether we or the remote can afford an additional HTLC.
742 struct HTLCCandidate {
743         amount_msat: u64,
744         origin: HTLCInitiator,
745 }
746
747 impl HTLCCandidate {
748         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
749                 Self {
750                         amount_msat,
751                         origin,
752                 }
753         }
754 }
755
756 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
757 /// description
758 enum UpdateFulfillFetch {
759         NewClaim {
760                 monitor_update: ChannelMonitorUpdate,
761                 htlc_value_msat: u64,
762                 msg: Option<msgs::UpdateFulfillHTLC>,
763         },
764         DuplicateClaim {},
765 }
766
767 /// The return type of get_update_fulfill_htlc_and_commit.
768 pub enum UpdateFulfillCommitFetch {
769         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
770         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
771         /// previously placed in the holding cell (and has since been removed).
772         NewClaim {
773                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
774                 monitor_update: ChannelMonitorUpdate,
775                 /// The value of the HTLC which was claimed, in msat.
776                 htlc_value_msat: u64,
777         },
778         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
779         /// or has been forgotten (presumably previously claimed).
780         DuplicateClaim {},
781 }
782
783 /// The return value of `monitor_updating_restored`
784 pub(super) struct MonitorRestoreUpdates {
785         pub raa: Option<msgs::RevokeAndACK>,
786         pub commitment_update: Option<msgs::CommitmentUpdate>,
787         pub order: RAACommitmentOrder,
788         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
789         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
790         pub finalized_claimed_htlcs: Vec<HTLCSource>,
791         pub funding_broadcastable: Option<Transaction>,
792         pub channel_ready: Option<msgs::ChannelReady>,
793         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
794 }
795
796 /// The return value of `signer_maybe_unblocked`
797 #[allow(unused)]
798 pub(super) struct SignerResumeUpdates {
799         pub commitment_update: Option<msgs::CommitmentUpdate>,
800         pub funding_signed: Option<msgs::FundingSigned>,
801         pub channel_ready: Option<msgs::ChannelReady>,
802 }
803
804 /// The return value of `channel_reestablish`
805 pub(super) struct ReestablishResponses {
806         pub channel_ready: Option<msgs::ChannelReady>,
807         pub raa: Option<msgs::RevokeAndACK>,
808         pub commitment_update: Option<msgs::CommitmentUpdate>,
809         pub order: RAACommitmentOrder,
810         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
811         pub shutdown_msg: Option<msgs::Shutdown>,
812 }
813
814 /// The result of a shutdown that should be handled.
815 #[must_use]
816 pub(crate) struct ShutdownResult {
817         /// A channel monitor update to apply.
818         pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>,
819         /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
820         pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
821         /// An unbroadcasted batch funding transaction id. The closure of this channel should be
822         /// propagated to the remainder of the batch.
823         pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
824         pub(crate) channel_id: ChannelId,
825         pub(crate) counterparty_node_id: PublicKey,
826 }
827
828 /// If the majority of the channels funds are to the fundee and the initiator holds only just
829 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
830 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
831 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
832 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
833 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
834 /// by this multiple without hitting this case, before sending.
835 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
836 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
837 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
838 /// leave the channel less usable as we hold a bigger reserve.
839 #[cfg(any(fuzzing, test))]
840 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
841 #[cfg(not(any(fuzzing, test)))]
842 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
843
844 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
845 /// channel creation on an inbound channel, we simply force-close and move on.
846 /// This constant is the one suggested in BOLT 2.
847 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
848
849 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
850 /// not have enough balance value remaining to cover the onchain cost of this new
851 /// HTLC weight. If this happens, our counterparty fails the reception of our
852 /// commitment_signed including this new HTLC due to infringement on the channel
853 /// reserve.
854 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
855 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
856 /// leads to a channel force-close. Ultimately, this is an issue coming from the
857 /// design of LN state machines, allowing asynchronous updates.
858 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
859
860 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
861 /// commitment transaction fees, with at least this many HTLCs present on the commitment
862 /// transaction (not counting the value of the HTLCs themselves).
863 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
864
865 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
866 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
867 /// ChannelUpdate prompted by the config update. This value was determined as follows:
868 ///
869 ///   * The expected interval between ticks (1 minute).
870 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
871 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
872 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
873 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
874
875 /// The number of ticks that may elapse while we're waiting for a response to a
876 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
877 /// them.
878 ///
879 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
880 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
881
882 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
883 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
884 /// exceeding this age limit will be force-closed and purged from memory.
885 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
886
887 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
888 pub(crate) const COINBASE_MATURITY: u32 = 100;
889
890 struct PendingChannelMonitorUpdate {
891         update: ChannelMonitorUpdate,
892 }
893
894 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
895         (0, update, required),
896 });
897
898 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
899 /// its variants containing an appropriate channel struct.
900 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
901         UnfundedOutboundV1(OutboundV1Channel<SP>),
902         UnfundedInboundV1(InboundV1Channel<SP>),
903         Funded(Channel<SP>),
904 }
905
906 impl<'a, SP: Deref> ChannelPhase<SP> where
907         SP::Target: SignerProvider,
908         <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
909 {
910         pub fn context(&'a self) -> &'a ChannelContext<SP> {
911                 match self {
912                         ChannelPhase::Funded(chan) => &chan.context,
913                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
914                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
915                 }
916         }
917
918         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
919                 match self {
920                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
921                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
922                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
923                 }
924         }
925 }
926
927 /// Contains all state common to unfunded inbound/outbound channels.
928 pub(super) struct UnfundedChannelContext {
929         /// A counter tracking how many ticks have elapsed since this unfunded channel was
930         /// created. If this unfunded channel reaches peer has yet to respond after reaching
931         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
932         ///
933         /// This is so that we don't keep channels around that haven't progressed to a funded state
934         /// in a timely manner.
935         unfunded_channel_age_ticks: usize,
936 }
937
938 impl UnfundedChannelContext {
939         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
940         /// having reached the unfunded channel age limit.
941         ///
942         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
943         pub fn should_expire_unfunded_channel(&mut self) -> bool {
944                 self.unfunded_channel_age_ticks += 1;
945                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
946         }
947 }
948
949 /// Contains everything about the channel including state, and various flags.
950 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
951         config: LegacyChannelConfig,
952
953         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
954         // constructed using it. The second element in the tuple corresponds to the number of ticks that
955         // have elapsed since the update occurred.
956         prev_config: Option<(ChannelConfig, usize)>,
957
958         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
959
960         user_id: u128,
961
962         /// The current channel ID.
963         channel_id: ChannelId,
964         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
965         /// Will be `None` for channels created prior to 0.0.115.
966         temporary_channel_id: Option<ChannelId>,
967         channel_state: ChannelState,
968
969         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
970         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
971         // next connect.
972         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
973         // Note that a number of our tests were written prior to the behavior here which retransmits
974         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
975         // many tests.
976         #[cfg(any(test, feature = "_test_utils"))]
977         pub(crate) announcement_sigs_state: AnnouncementSigsState,
978         #[cfg(not(any(test, feature = "_test_utils")))]
979         announcement_sigs_state: AnnouncementSigsState,
980
981         secp_ctx: Secp256k1<secp256k1::All>,
982         channel_value_satoshis: u64,
983
984         latest_monitor_update_id: u64,
985
986         holder_signer: ChannelSignerType<SP>,
987         shutdown_scriptpubkey: Option<ShutdownScript>,
988         destination_script: ScriptBuf,
989
990         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
991         // generation start at 0 and count up...this simplifies some parts of implementation at the
992         // cost of others, but should really just be changed.
993
994         cur_holder_commitment_transaction_number: u64,
995         cur_counterparty_commitment_transaction_number: u64,
996         value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
997         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
998         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
999         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1000
1001         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1002         /// need to ensure we resend them in the order we originally generated them. Note that because
1003         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1004         /// sufficient to simply set this to the opposite of any message we are generating as we
1005         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1006         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1007         /// send it first.
1008         resend_order: RAACommitmentOrder,
1009
1010         monitor_pending_channel_ready: bool,
1011         monitor_pending_revoke_and_ack: bool,
1012         monitor_pending_commitment_signed: bool,
1013
1014         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1015         // responsible for some of the HTLCs here or not - we don't know whether the update in question
1016         // completed or not. We currently ignore these fields entirely when force-closing a channel,
1017         // but need to handle this somehow or we run the risk of losing HTLCs!
1018         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1019         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1020         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1021
1022         /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1023         /// but our signer (initially) refused to give us a signature, we should retry at some point in
1024         /// the future when the signer indicates it may have a signature for us.
1025         ///
1026         /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1027         /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1028         signer_pending_commitment_update: bool,
1029         /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1030         /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1031         /// outbound or inbound.
1032         signer_pending_funding: bool,
1033
1034         // pending_update_fee is filled when sending and receiving update_fee.
1035         //
1036         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1037         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1038         // generating new commitment transactions with exactly the same criteria as inbound/outbound
1039         // HTLCs with similar state.
1040         pending_update_fee: Option<(u32, FeeUpdateState)>,
1041         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1042         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1043         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1044         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1045         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1046         holding_cell_update_fee: Option<u32>,
1047         next_holder_htlc_id: u64,
1048         next_counterparty_htlc_id: u64,
1049         feerate_per_kw: u32,
1050
1051         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1052         /// when the channel is updated in ways which may impact the `channel_update` message or when a
1053         /// new block is received, ensuring it's always at least moderately close to the current real
1054         /// time.
1055         update_time_counter: u32,
1056
1057         #[cfg(debug_assertions)]
1058         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1059         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1060         #[cfg(debug_assertions)]
1061         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1062         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1063
1064         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1065         target_closing_feerate_sats_per_kw: Option<u32>,
1066
1067         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1068         /// update, we need to delay processing it until later. We do that here by simply storing the
1069         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1070         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1071
1072         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1073         /// transaction. These are set once we reach `closing_negotiation_ready`.
1074         #[cfg(test)]
1075         pub(crate) closing_fee_limits: Option<(u64, u64)>,
1076         #[cfg(not(test))]
1077         closing_fee_limits: Option<(u64, u64)>,
1078
1079         /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1080         /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1081         /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1082         /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1083         /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1084         ///
1085         /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1086         /// until we see a `commitment_signed` before doing so.
1087         ///
1088         /// We don't bother to persist this - we anticipate this state won't last longer than a few
1089         /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1090         expecting_peer_commitment_signed: bool,
1091
1092         /// The hash of the block in which the funding transaction was included.
1093         funding_tx_confirmed_in: Option<BlockHash>,
1094         funding_tx_confirmation_height: u32,
1095         short_channel_id: Option<u64>,
1096         /// Either the height at which this channel was created or the height at which it was last
1097         /// serialized if it was serialized by versions prior to 0.0.103.
1098         /// We use this to close if funding is never broadcasted.
1099         channel_creation_height: u32,
1100
1101         counterparty_dust_limit_satoshis: u64,
1102
1103         #[cfg(test)]
1104         pub(super) holder_dust_limit_satoshis: u64,
1105         #[cfg(not(test))]
1106         holder_dust_limit_satoshis: u64,
1107
1108         #[cfg(test)]
1109         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1110         #[cfg(not(test))]
1111         counterparty_max_htlc_value_in_flight_msat: u64,
1112
1113         #[cfg(test)]
1114         pub(super) holder_max_htlc_value_in_flight_msat: u64,
1115         #[cfg(not(test))]
1116         holder_max_htlc_value_in_flight_msat: u64,
1117
1118         /// minimum channel reserve for self to maintain - set by them.
1119         counterparty_selected_channel_reserve_satoshis: Option<u64>,
1120
1121         #[cfg(test)]
1122         pub(super) holder_selected_channel_reserve_satoshis: u64,
1123         #[cfg(not(test))]
1124         holder_selected_channel_reserve_satoshis: u64,
1125
1126         counterparty_htlc_minimum_msat: u64,
1127         holder_htlc_minimum_msat: u64,
1128         #[cfg(test)]
1129         pub counterparty_max_accepted_htlcs: u16,
1130         #[cfg(not(test))]
1131         counterparty_max_accepted_htlcs: u16,
1132         holder_max_accepted_htlcs: u16,
1133         minimum_depth: Option<u32>,
1134
1135         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1136
1137         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1138         funding_transaction: Option<Transaction>,
1139         is_batch_funding: Option<()>,
1140
1141         counterparty_cur_commitment_point: Option<PublicKey>,
1142         counterparty_prev_commitment_point: Option<PublicKey>,
1143         counterparty_node_id: PublicKey,
1144
1145         counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1146
1147         commitment_secrets: CounterpartyCommitmentSecrets,
1148
1149         channel_update_status: ChannelUpdateStatus,
1150         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1151         /// not complete within a single timer tick (one minute), we should force-close the channel.
1152         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1153         /// to DoS us.
1154         /// Note that this field is reset to false on deserialization to give us a chance to connect to
1155         /// our peer and start the closing_signed negotiation fresh.
1156         closing_signed_in_flight: bool,
1157
1158         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1159         /// This can be used to rebroadcast the channel_announcement message later.
1160         announcement_sigs: Option<(Signature, Signature)>,
1161
1162         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1163         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1164         // be, by comparing the cached values to the fee of the tranaction generated by
1165         // `build_commitment_transaction`.
1166         #[cfg(any(test, fuzzing))]
1167         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1168         #[cfg(any(test, fuzzing))]
1169         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1170
1171         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1172         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1173         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1174         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1175         /// message until we receive a channel_reestablish.
1176         ///
1177         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1178         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1179
1180         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1181         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1182         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1183         /// unblock the state machine.
1184         ///
1185         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1186         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1187         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1188         ///
1189         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1190         /// [`msgs::RevokeAndACK`] message from the counterparty.
1191         sent_message_awaiting_response: Option<usize>,
1192
1193         #[cfg(any(test, fuzzing))]
1194         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1195         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1196         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1197         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1198         // is fine, but as a sanity check in our failure to generate the second claim, we check here
1199         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1200         historical_inbound_htlc_fulfills: HashSet<u64>,
1201
1202         /// This channel's type, as negotiated during channel open
1203         channel_type: ChannelTypeFeatures,
1204
1205         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1206         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1207         // the channel's funding UTXO.
1208         //
1209         // We also use this when sending our peer a channel_update that isn't to be broadcasted
1210         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1211         // associated channel mapping.
1212         //
1213         // We only bother storing the most recent SCID alias at any time, though our counterparty has
1214         // to store all of them.
1215         latest_inbound_scid_alias: Option<u64>,
1216
1217         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1218         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1219         // don't currently support node id aliases and eventually privacy should be provided with
1220         // blinded paths instead of simple scid+node_id aliases.
1221         outbound_scid_alias: u64,
1222
1223         // We track whether we already emitted a `ChannelPending` event.
1224         channel_pending_event_emitted: bool,
1225
1226         // We track whether we already emitted a `ChannelReady` event.
1227         channel_ready_event_emitted: bool,
1228
1229         /// The unique identifier used to re-derive the private key material for the channel through
1230         /// [`SignerProvider::derive_channel_signer`].
1231         channel_keys_id: [u8; 32],
1232
1233         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1234         /// store it here and only release it to the `ChannelManager` once it asks for it.
1235         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1236 }
1237
1238 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
1239         /// Allowed in any state (including after shutdown)
1240         pub fn get_update_time_counter(&self) -> u32 {
1241                 self.update_time_counter
1242         }
1243
1244         pub fn get_latest_monitor_update_id(&self) -> u64 {
1245                 self.latest_monitor_update_id
1246         }
1247
1248         pub fn should_announce(&self) -> bool {
1249                 self.config.announced_channel
1250         }
1251
1252         pub fn is_outbound(&self) -> bool {
1253                 self.channel_transaction_parameters.is_outbound_from_holder
1254         }
1255
1256         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1257         /// Allowed in any state (including after shutdown)
1258         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1259                 self.config.options.forwarding_fee_base_msat
1260         }
1261
1262         /// Returns true if we've ever received a message from the remote end for this Channel
1263         pub fn have_received_message(&self) -> bool {
1264                 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1265         }
1266
1267         /// Returns true if this channel is fully established and not known to be closing.
1268         /// Allowed in any state (including after shutdown)
1269         pub fn is_usable(&self) -> bool {
1270                 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1271                         !self.channel_state.is_local_shutdown_sent() &&
1272                         !self.channel_state.is_remote_shutdown_sent() &&
1273                         !self.monitor_pending_channel_ready
1274         }
1275
1276         /// shutdown state returns the state of the channel in its various stages of shutdown
1277         pub fn shutdown_state(&self) -> ChannelShutdownState {
1278                 match self.channel_state {
1279                         ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1280                                 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1281                                         ChannelShutdownState::ShutdownInitiated
1282                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1283                                         ChannelShutdownState::ResolvingHTLCs
1284                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1285                                         ChannelShutdownState::NegotiatingClosingFee
1286                                 } else {
1287                                         ChannelShutdownState::NotShuttingDown
1288                                 },
1289                         ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1290                         _ => ChannelShutdownState::NotShuttingDown,
1291                 }
1292         }
1293
1294         fn closing_negotiation_ready(&self) -> bool {
1295                 let is_ready_to_close = match self.channel_state {
1296                         ChannelState::AwaitingChannelReady(flags) =>
1297                                 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1298                         ChannelState::ChannelReady(flags) =>
1299                                 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1300                         _ => false,
1301                 };
1302                 self.pending_inbound_htlcs.is_empty() &&
1303                         self.pending_outbound_htlcs.is_empty() &&
1304                         self.pending_update_fee.is_none() &&
1305                         is_ready_to_close
1306         }
1307
1308         /// Returns true if this channel is currently available for use. This is a superset of
1309         /// is_usable() and considers things like the channel being temporarily disabled.
1310         /// Allowed in any state (including after shutdown)
1311         pub fn is_live(&self) -> bool {
1312                 self.is_usable() && !self.channel_state.is_peer_disconnected()
1313         }
1314
1315         // Public utilities:
1316
1317         pub fn channel_id(&self) -> ChannelId {
1318                 self.channel_id
1319         }
1320
1321         // Return the `temporary_channel_id` used during channel establishment.
1322         //
1323         // Will return `None` for channels created prior to LDK version 0.0.115.
1324         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1325                 self.temporary_channel_id
1326         }
1327
1328         pub fn minimum_depth(&self) -> Option<u32> {
1329                 self.minimum_depth
1330         }
1331
1332         /// Gets the "user_id" value passed into the construction of this channel. It has no special
1333         /// meaning and exists only to allow users to have a persistent identifier of a channel.
1334         pub fn get_user_id(&self) -> u128 {
1335                 self.user_id
1336         }
1337
1338         /// Gets the channel's type
1339         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1340                 &self.channel_type
1341         }
1342
1343         /// Gets the channel's `short_channel_id`.
1344         ///
1345         /// Will return `None` if the channel hasn't been confirmed yet.
1346         pub fn get_short_channel_id(&self) -> Option<u64> {
1347                 self.short_channel_id
1348         }
1349
1350         /// Allowed in any state (including after shutdown)
1351         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1352                 self.latest_inbound_scid_alias
1353         }
1354
1355         /// Allowed in any state (including after shutdown)
1356         pub fn outbound_scid_alias(&self) -> u64 {
1357                 self.outbound_scid_alias
1358         }
1359
1360         /// Returns the holder signer for this channel.
1361         #[cfg(test)]
1362         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1363                 return &self.holder_signer
1364         }
1365
1366         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1367         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1368         /// or prior to any channel actions during `Channel` initialization.
1369         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1370                 debug_assert_eq!(self.outbound_scid_alias, 0);
1371                 self.outbound_scid_alias = outbound_scid_alias;
1372         }
1373
1374         /// Returns the funding_txo we either got from our peer, or were given by
1375         /// get_funding_created.
1376         pub fn get_funding_txo(&self) -> Option<OutPoint> {
1377                 self.channel_transaction_parameters.funding_outpoint
1378         }
1379
1380         /// Returns the height in which our funding transaction was confirmed.
1381         pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1382                 let conf_height = self.funding_tx_confirmation_height;
1383                 if conf_height > 0 {
1384                         Some(conf_height)
1385                 } else {
1386                         None
1387                 }
1388         }
1389
1390         /// Returns the block hash in which our funding transaction was confirmed.
1391         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1392                 self.funding_tx_confirmed_in
1393         }
1394
1395         /// Returns the current number of confirmations on the funding transaction.
1396         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1397                 if self.funding_tx_confirmation_height == 0 {
1398                         // We either haven't seen any confirmation yet, or observed a reorg.
1399                         return 0;
1400                 }
1401
1402                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
1403         }
1404
1405         fn get_holder_selected_contest_delay(&self) -> u16 {
1406                 self.channel_transaction_parameters.holder_selected_contest_delay
1407         }
1408
1409         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
1410                 &self.channel_transaction_parameters.holder_pubkeys
1411         }
1412
1413         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
1414                 self.channel_transaction_parameters.counterparty_parameters
1415                         .as_ref().map(|params| params.selected_contest_delay)
1416         }
1417
1418         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
1419                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
1420         }
1421
1422         /// Allowed in any state (including after shutdown)
1423         pub fn get_counterparty_node_id(&self) -> PublicKey {
1424                 self.counterparty_node_id
1425         }
1426
1427         /// Allowed in any state (including after shutdown)
1428         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
1429                 self.holder_htlc_minimum_msat
1430         }
1431
1432         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1433         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
1434                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
1435         }
1436
1437         /// Allowed in any state (including after shutdown)
1438         pub fn get_announced_htlc_max_msat(&self) -> u64 {
1439                 return cmp::min(
1440                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
1441                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
1442                         // channel might have been used to route very small values (either by honest users or as DoS).
1443                         self.channel_value_satoshis * 1000 * 9 / 10,
1444
1445                         self.counterparty_max_htlc_value_in_flight_msat
1446                 );
1447         }
1448
1449         /// Allowed in any state (including after shutdown)
1450         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
1451                 self.counterparty_htlc_minimum_msat
1452         }
1453
1454         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
1455         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
1456                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
1457         }
1458
1459         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
1460                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
1461                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
1462                         cmp::min(
1463                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
1464                                 party_max_htlc_value_in_flight_msat
1465                         )
1466                 })
1467         }
1468
1469         pub fn get_value_satoshis(&self) -> u64 {
1470                 self.channel_value_satoshis
1471         }
1472
1473         pub fn get_fee_proportional_millionths(&self) -> u32 {
1474                 self.config.options.forwarding_fee_proportional_millionths
1475         }
1476
1477         pub fn get_cltv_expiry_delta(&self) -> u16 {
1478                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
1479         }
1480
1481         pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
1482                 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
1483         where F::Target: FeeEstimator
1484         {
1485                 match self.config.options.max_dust_htlc_exposure {
1486                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
1487                                 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
1488                                         ConfirmationTarget::OnChainSweep) as u64;
1489                                 feerate_per_kw.saturating_mul(multiplier)
1490                         },
1491                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
1492                 }
1493         }
1494
1495         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
1496         pub fn prev_config(&self) -> Option<ChannelConfig> {
1497                 self.prev_config.map(|prev_config| prev_config.0)
1498         }
1499
1500         // Checks whether we should emit a `ChannelPending` event.
1501         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
1502                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
1503         }
1504
1505         // Returns whether we already emitted a `ChannelPending` event.
1506         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
1507                 self.channel_pending_event_emitted
1508         }
1509
1510         // Remembers that we already emitted a `ChannelPending` event.
1511         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
1512                 self.channel_pending_event_emitted = true;
1513         }
1514
1515         // Checks whether we should emit a `ChannelReady` event.
1516         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
1517                 self.is_usable() && !self.channel_ready_event_emitted
1518         }
1519
1520         // Remembers that we already emitted a `ChannelReady` event.
1521         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
1522                 self.channel_ready_event_emitted = true;
1523         }
1524
1525         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
1526         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
1527         /// no longer be considered when forwarding HTLCs.
1528         pub fn maybe_expire_prev_config(&mut self) {
1529                 if self.prev_config.is_none() {
1530                         return;
1531                 }
1532                 let prev_config = self.prev_config.as_mut().unwrap();
1533                 prev_config.1 += 1;
1534                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
1535                         self.prev_config = None;
1536                 }
1537         }
1538
1539         /// Returns the current [`ChannelConfig`] applied to the channel.
1540         pub fn config(&self) -> ChannelConfig {
1541                 self.config.options
1542         }
1543
1544         /// Updates the channel's config. A bool is returned indicating whether the config update
1545         /// applied resulted in a new ChannelUpdate message.
1546         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
1547                 let did_channel_update =
1548                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
1549                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
1550                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
1551                 if did_channel_update {
1552                         self.prev_config = Some((self.config.options, 0));
1553                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
1554                         // policy change to propagate throughout the network.
1555                         self.update_time_counter += 1;
1556                 }
1557                 self.config.options = *config;
1558                 did_channel_update
1559         }
1560
1561         /// Returns true if funding_signed was sent/received and the
1562         /// funding transaction has been broadcast if necessary.
1563         pub fn is_funding_broadcast(&self) -> bool {
1564                 !self.channel_state.is_pre_funded_state() &&
1565                         !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
1566         }
1567
1568         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
1569         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
1570         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
1571         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
1572         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
1573         /// an HTLC to a).
1574         /// @local is used only to convert relevant internal structures which refer to remote vs local
1575         /// to decide value of outputs and direction of HTLCs.
1576         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
1577         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
1578         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
1579         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
1580         /// which peer generated this transaction and "to whom" this transaction flows.
1581         #[inline]
1582         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
1583                 where L::Target: Logger
1584         {
1585                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
1586                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
1587                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
1588
1589                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
1590                 let mut remote_htlc_total_msat = 0;
1591                 let mut local_htlc_total_msat = 0;
1592                 let mut value_to_self_msat_offset = 0;
1593
1594                 let mut feerate_per_kw = self.feerate_per_kw;
1595                 if let Some((feerate, update_state)) = self.pending_update_fee {
1596                         if match update_state {
1597                                 // Note that these match the inclusion criteria when scanning
1598                                 // pending_inbound_htlcs below.
1599                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
1600                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
1601                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
1602                         } {
1603                                 feerate_per_kw = feerate;
1604                         }
1605                 }
1606
1607                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
1608                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
1609                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
1610                         &self.channel_id,
1611                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
1612
1613                 macro_rules! get_htlc_in_commitment {
1614                         ($htlc: expr, $offered: expr) => {
1615                                 HTLCOutputInCommitment {
1616                                         offered: $offered,
1617                                         amount_msat: $htlc.amount_msat,
1618                                         cltv_expiry: $htlc.cltv_expiry,
1619                                         payment_hash: $htlc.payment_hash,
1620                                         transaction_output_index: None
1621                                 }
1622                         }
1623                 }
1624
1625                 macro_rules! add_htlc_output {
1626                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
1627                                 if $outbound == local { // "offered HTLC output"
1628                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
1629                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1630                                                 0
1631                                         } else {
1632                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
1633                                         };
1634                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1635                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1636                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1637                                         } else {
1638                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1639                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1640                                         }
1641                                 } else {
1642                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
1643                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1644                                                 0
1645                                         } else {
1646                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
1647                                         };
1648                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
1649                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1650                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
1651                                         } else {
1652                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
1653                                                 included_dust_htlcs.push((htlc_in_tx, $source));
1654                                         }
1655                                 }
1656                         }
1657                 }
1658
1659                 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1660
1661                 for ref htlc in self.pending_inbound_htlcs.iter() {
1662                         let (include, state_name) = match htlc.state {
1663                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
1664                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
1665                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
1666                                 InboundHTLCState::Committed => (true, "Committed"),
1667                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
1668                         };
1669
1670                         if include {
1671                                 add_htlc_output!(htlc, false, None, state_name);
1672                                 remote_htlc_total_msat += htlc.amount_msat;
1673                         } else {
1674                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1675                                 match &htlc.state {
1676                                         &InboundHTLCState::LocalRemoved(ref reason) => {
1677                                                 if generated_by_local {
1678                                                         if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
1679                                                                 inbound_htlc_preimages.push(preimage);
1680                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
1681                                                         }
1682                                                 }
1683                                         },
1684                                         _ => {},
1685                                 }
1686                         }
1687                 }
1688
1689
1690                 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
1691
1692                 for ref htlc in self.pending_outbound_htlcs.iter() {
1693                         let (include, state_name) = match htlc.state {
1694                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
1695                                 OutboundHTLCState::Committed => (true, "Committed"),
1696                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
1697                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
1698                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
1699                         };
1700
1701                         let preimage_opt = match htlc.state {
1702                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
1703                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
1704                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
1705                                 _ => None,
1706                         };
1707
1708                         if let Some(preimage) = preimage_opt {
1709                                 outbound_htlc_preimages.push(preimage);
1710                         }
1711
1712                         if include {
1713                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
1714                                 local_htlc_total_msat += htlc.amount_msat;
1715                         } else {
1716                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
1717                                 match htlc.state {
1718                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
1719                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
1720                                         },
1721                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
1722                                                 if !generated_by_local {
1723                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
1724                                                 }
1725                                         },
1726                                         _ => {},
1727                                 }
1728                         }
1729                 }
1730
1731                 let mut value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
1732                 assert!(value_to_self_msat >= 0);
1733                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
1734                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
1735                 // "violate" their reserve value by couting those against it. Thus, we have to convert
1736                 // everything to i64 before subtracting as otherwise we can overflow.
1737                 let mut value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
1738                 assert!(value_to_remote_msat >= 0);
1739
1740                 #[cfg(debug_assertions)]
1741                 {
1742                         // Make sure that the to_self/to_remote is always either past the appropriate
1743                         // channel_reserve *or* it is making progress towards it.
1744                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
1745                                 self.holder_max_commitment_tx_output.lock().unwrap()
1746                         } else {
1747                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
1748                         };
1749                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
1750                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
1751                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
1752                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
1753                 }
1754
1755                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
1756                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
1757                 let (value_to_self, value_to_remote) = if self.is_outbound() {
1758                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
1759                 } else {
1760                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
1761                 };
1762
1763                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
1764                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
1765                 let (funding_pubkey_a, funding_pubkey_b) = if local {
1766                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
1767                 } else {
1768                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
1769                 };
1770
1771                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
1772                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
1773                 } else {
1774                         value_to_a = 0;
1775                 }
1776
1777                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
1778                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
1779                 } else {
1780                         value_to_b = 0;
1781                 }
1782
1783                 let num_nondust_htlcs = included_non_dust_htlcs.len();
1784
1785                 let channel_parameters =
1786                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
1787                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
1788                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
1789                                                                              value_to_a as u64,
1790                                                                              value_to_b as u64,
1791                                                                              funding_pubkey_a,
1792                                                                              funding_pubkey_b,
1793                                                                              keys.clone(),
1794                                                                              feerate_per_kw,
1795                                                                              &mut included_non_dust_htlcs,
1796                                                                              &channel_parameters
1797                 );
1798                 let mut htlcs_included = included_non_dust_htlcs;
1799                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
1800                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
1801                 htlcs_included.append(&mut included_dust_htlcs);
1802
1803                 // For the stats, trimmed-to-0 the value in msats accordingly
1804                 value_to_self_msat = if (value_to_self_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_self_msat };
1805                 value_to_remote_msat = if (value_to_remote_msat * 1000) < broadcaster_dust_limit_satoshis as i64 { 0 } else { value_to_remote_msat };
1806
1807                 CommitmentStats {
1808                         tx,
1809                         feerate_per_kw,
1810                         total_fee_sat,
1811                         num_nondust_htlcs,
1812                         htlcs_included,
1813                         local_balance_msat: value_to_self_msat as u64,
1814                         remote_balance_msat: value_to_remote_msat as u64,
1815                         inbound_htlc_preimages,
1816                         outbound_htlc_preimages,
1817                 }
1818         }
1819
1820         #[inline]
1821         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
1822         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
1823         /// our counterparty!)
1824         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
1825         /// TODO Some magic rust shit to compile-time check this?
1826         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
1827                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
1828                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
1829                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1830                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1831
1832                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
1833         }
1834
1835         #[inline]
1836         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
1837         /// will sign and send to our counterparty.
1838         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
1839         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
1840                 //TODO: Ensure that the payment_key derived here ends up in the library users' wallet as we
1841                 //may see payments to it!
1842                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
1843                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
1844                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
1845
1846                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
1847         }
1848
1849         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
1850         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
1851         /// Panics if called before accept_channel/InboundV1Channel::new
1852         pub fn get_funding_redeemscript(&self) -> ScriptBuf {
1853                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
1854         }
1855
1856         fn counterparty_funding_pubkey(&self) -> &PublicKey {
1857                 &self.get_counterparty_pubkeys().funding_pubkey
1858         }
1859
1860         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
1861                 self.feerate_per_kw
1862         }
1863
1864         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
1865                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
1866                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
1867                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
1868                 // more dust balance if the feerate increases when we have several HTLCs pending
1869                 // which are near the dust limit.
1870                 let mut feerate_per_kw = self.feerate_per_kw;
1871                 // If there's a pending update fee, use it to ensure we aren't under-estimating
1872                 // potential feerate updates coming soon.
1873                 if let Some((feerate, _)) = self.pending_update_fee {
1874                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1875                 }
1876                 if let Some(feerate) = outbound_feerate_update {
1877                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
1878                 }
1879                 cmp::max(2530, feerate_per_kw * 1250 / 1000)
1880         }
1881
1882         /// Get forwarding information for the counterparty.
1883         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
1884                 self.counterparty_forwarding_info.clone()
1885         }
1886
1887         /// Returns a HTLCStats about inbound pending htlcs
1888         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1889                 let context = self;
1890                 let mut stats = HTLCStats {
1891                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
1892                         pending_htlcs_value_msat: 0,
1893                         on_counterparty_tx_dust_exposure_msat: 0,
1894                         on_holder_tx_dust_exposure_msat: 0,
1895                         holding_cell_msat: 0,
1896                         on_holder_tx_holding_cell_htlcs_count: 0,
1897                 };
1898
1899                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1900                         (0, 0)
1901                 } else {
1902                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1903                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1904                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1905                 };
1906                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
1907                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
1908                 for ref htlc in context.pending_inbound_htlcs.iter() {
1909                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1910                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
1911                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1912                         }
1913                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
1914                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1915                         }
1916                 }
1917                 stats
1918         }
1919
1920         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
1921         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
1922                 let context = self;
1923                 let mut stats = HTLCStats {
1924                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
1925                         pending_htlcs_value_msat: 0,
1926                         on_counterparty_tx_dust_exposure_msat: 0,
1927                         on_holder_tx_dust_exposure_msat: 0,
1928                         holding_cell_msat: 0,
1929                         on_holder_tx_holding_cell_htlcs_count: 0,
1930                 };
1931
1932                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1933                         (0, 0)
1934                 } else {
1935                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
1936                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
1937                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
1938                 };
1939                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
1940                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
1941                 for ref htlc in context.pending_outbound_htlcs.iter() {
1942                         stats.pending_htlcs_value_msat += htlc.amount_msat;
1943                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
1944                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
1945                         }
1946                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
1947                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
1948                         }
1949                 }
1950
1951                 for update in context.holding_cell_htlc_updates.iter() {
1952                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
1953                                 stats.pending_htlcs += 1;
1954                                 stats.pending_htlcs_value_msat += amount_msat;
1955                                 stats.holding_cell_msat += amount_msat;
1956                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
1957                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
1958                                 }
1959                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
1960                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
1961                                 } else {
1962                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
1963                                 }
1964                         }
1965                 }
1966                 stats
1967         }
1968
1969         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
1970         /// Doesn't bother handling the
1971         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
1972         /// corner case properly.
1973         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
1974         -> AvailableBalances
1975         where F::Target: FeeEstimator
1976         {
1977                 let context = &self;
1978                 // Note that we have to handle overflow due to the above case.
1979                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
1980                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
1981
1982                 let mut balance_msat = context.value_to_self_msat;
1983                 for ref htlc in context.pending_inbound_htlcs.iter() {
1984                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
1985                                 balance_msat += htlc.amount_msat;
1986                         }
1987                 }
1988                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
1989
1990                 let outbound_capacity_msat = context.value_to_self_msat
1991                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
1992                                 .saturating_sub(
1993                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
1994
1995                 let mut available_capacity_msat = outbound_capacity_msat;
1996
1997                 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
1998                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
1999                 } else {
2000                         0
2001                 };
2002                 if context.is_outbound() {
2003                         // We should mind channel commit tx fee when computing how much of the available capacity
2004                         // can be used in the next htlc. Mirrors the logic in send_htlc.
2005                         //
2006                         // The fee depends on whether the amount we will be sending is above dust or not,
2007                         // and the answer will in turn change the amount itself â€” making it a circular
2008                         // dependency.
2009                         // This complicates the computation around dust-values, up to the one-htlc-value.
2010                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2011                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2012                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2013                         }
2014
2015                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2016                         let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2017                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2018                         let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2019                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2020                                 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2021                                 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2022                         }
2023
2024                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
2025                         // value ends up being below dust, we have this fee available again. In that case,
2026                         // match the value to right-below-dust.
2027                         let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2028                                 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2029                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2030                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2031                                 debug_assert!(one_htlc_difference_msat != 0);
2032                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2033                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2034                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2035                         } else {
2036                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2037                         }
2038                 } else {
2039                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2040                         // sending a new HTLC won't reduce their balance below our reserve threshold.
2041                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2042                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2043                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2044                         }
2045
2046                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2047                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2048
2049                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2050                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2051                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2052
2053                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2054                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2055                                 // we've selected for them, we can only send dust HTLCs.
2056                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2057                         }
2058                 }
2059
2060                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2061
2062                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2063                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2064                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2065                 // send above the dust limit (as the router can always overpay to meet the dust limit).
2066                 let mut remaining_msat_below_dust_exposure_limit = None;
2067                 let mut dust_exposure_dust_limit_msat = 0;
2068                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2069
2070                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2071                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2072                 } else {
2073                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2074                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2075                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2076                 };
2077                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2078                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2079                         remaining_msat_below_dust_exposure_limit =
2080                                 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2081                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2082                 }
2083
2084                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2085                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2086                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2087                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2088                                 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2089                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2090                 }
2091
2092                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2093                         if available_capacity_msat < dust_exposure_dust_limit_msat {
2094                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2095                         } else {
2096                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2097                         }
2098                 }
2099
2100                 available_capacity_msat = cmp::min(available_capacity_msat,
2101                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2102
2103                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2104                         available_capacity_msat = 0;
2105                 }
2106
2107                 AvailableBalances {
2108                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2109                                         - context.value_to_self_msat as i64
2110                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2111                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2112                                 0) as u64,
2113                         outbound_capacity_msat,
2114                         next_outbound_htlc_limit_msat: available_capacity_msat,
2115                         next_outbound_htlc_minimum_msat,
2116                         balance_msat,
2117                 }
2118         }
2119
2120         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2121                 let context = &self;
2122                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2123         }
2124
2125         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2126         /// number of pending HTLCs that are on track to be in our next commitment tx.
2127         ///
2128         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2129         /// `fee_spike_buffer_htlc` is `Some`.
2130         ///
2131         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2132         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2133         ///
2134         /// Dust HTLCs are excluded.
2135         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2136                 let context = &self;
2137                 assert!(context.is_outbound());
2138
2139                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2140                         (0, 0)
2141                 } else {
2142                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2143                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2144                 };
2145                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2146                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2147
2148                 let mut addl_htlcs = 0;
2149                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2150                 match htlc.origin {
2151                         HTLCInitiator::LocalOffered => {
2152                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2153                                         addl_htlcs += 1;
2154                                 }
2155                         },
2156                         HTLCInitiator::RemoteOffered => {
2157                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2158                                         addl_htlcs += 1;
2159                                 }
2160                         }
2161                 }
2162
2163                 let mut included_htlcs = 0;
2164                 for ref htlc in context.pending_inbound_htlcs.iter() {
2165                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2166                                 continue
2167                         }
2168                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2169                         // transaction including this HTLC if it times out before they RAA.
2170                         included_htlcs += 1;
2171                 }
2172
2173                 for ref htlc in context.pending_outbound_htlcs.iter() {
2174                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2175                                 continue
2176                         }
2177                         match htlc.state {
2178                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2179                                 OutboundHTLCState::Committed => included_htlcs += 1,
2180                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2181                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2182                                 // transaction won't be generated until they send us their next RAA, which will mean
2183                                 // dropping any HTLCs in this state.
2184                                 _ => {},
2185                         }
2186                 }
2187
2188                 for htlc in context.holding_cell_htlc_updates.iter() {
2189                         match htlc {
2190                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2191                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
2192                                                 continue
2193                                         }
2194                                         included_htlcs += 1
2195                                 },
2196                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2197                                          // ack we're guaranteed to never include them in commitment txs anymore.
2198                         }
2199                 }
2200
2201                 let num_htlcs = included_htlcs + addl_htlcs;
2202                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2203                 #[cfg(any(test, fuzzing))]
2204                 {
2205                         let mut fee = res;
2206                         if fee_spike_buffer_htlc.is_some() {
2207                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2208                         }
2209                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2210                                 + context.holding_cell_htlc_updates.len();
2211                         let commitment_tx_info = CommitmentTxInfoCached {
2212                                 fee,
2213                                 total_pending_htlcs,
2214                                 next_holder_htlc_id: match htlc.origin {
2215                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2216                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2217                                 },
2218                                 next_counterparty_htlc_id: match htlc.origin {
2219                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2220                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2221                                 },
2222                                 feerate: context.feerate_per_kw,
2223                         };
2224                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2225                 }
2226                 res
2227         }
2228
2229         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2230         /// pending HTLCs that are on track to be in their next commitment tx
2231         ///
2232         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2233         /// `fee_spike_buffer_htlc` is `Some`.
2234         ///
2235         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2236         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2237         ///
2238         /// Dust HTLCs are excluded.
2239         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2240                 let context = &self;
2241                 assert!(!context.is_outbound());
2242
2243                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2244                         (0, 0)
2245                 } else {
2246                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2247                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2248                 };
2249                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2250                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2251
2252                 let mut addl_htlcs = 0;
2253                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2254                 match htlc.origin {
2255                         HTLCInitiator::LocalOffered => {
2256                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2257                                         addl_htlcs += 1;
2258                                 }
2259                         },
2260                         HTLCInitiator::RemoteOffered => {
2261                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2262                                         addl_htlcs += 1;
2263                                 }
2264                         }
2265                 }
2266
2267                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2268                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2269                 // committed outbound HTLCs, see below.
2270                 let mut included_htlcs = 0;
2271                 for ref htlc in context.pending_inbound_htlcs.iter() {
2272                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2273                                 continue
2274                         }
2275                         included_htlcs += 1;
2276                 }
2277
2278                 for ref htlc in context.pending_outbound_htlcs.iter() {
2279                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2280                                 continue
2281                         }
2282                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2283                         // i.e. if they've responded to us with an RAA after announcement.
2284                         match htlc.state {
2285                                 OutboundHTLCState::Committed => included_htlcs += 1,
2286                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2287                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2288                                 _ => {},
2289                         }
2290                 }
2291
2292                 let num_htlcs = included_htlcs + addl_htlcs;
2293                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2294                 #[cfg(any(test, fuzzing))]
2295                 {
2296                         let mut fee = res;
2297                         if fee_spike_buffer_htlc.is_some() {
2298                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2299                         }
2300                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2301                         let commitment_tx_info = CommitmentTxInfoCached {
2302                                 fee,
2303                                 total_pending_htlcs,
2304                                 next_holder_htlc_id: match htlc.origin {
2305                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2306                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2307                                 },
2308                                 next_counterparty_htlc_id: match htlc.origin {
2309                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2310                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2311                                 },
2312                                 feerate: context.feerate_per_kw,
2313                         };
2314                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2315                 }
2316                 res
2317         }
2318
2319         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O>
2320                 where F: Fn() -> Option<O> {
2321                 match self.channel_state {
2322                         ChannelState::FundingNegotiated => f(),
2323                         ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) {
2324                                 f()
2325                         } else {
2326                                 None
2327                         },
2328                         _ => None,
2329                 }
2330         }
2331
2332         /// Returns the transaction if there is a pending funding transaction that is yet to be
2333         /// broadcast.
2334         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
2335                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
2336         }
2337
2338         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
2339         /// broadcast.
2340         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
2341                 self.if_unbroadcasted_funding(||
2342                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
2343                 )
2344         }
2345
2346         /// Returns whether the channel is funded in a batch.
2347         pub fn is_batch_funding(&self) -> bool {
2348                 self.is_batch_funding.is_some()
2349         }
2350
2351         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
2352         /// broadcast.
2353         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
2354                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
2355         }
2356
2357         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
2358         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
2359         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
2360         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
2361         /// immediately (others we will have to allow to time out).
2362         pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult {
2363                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
2364                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
2365                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
2366                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
2367                 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
2368
2369                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
2370                 // return them to fail the payment.
2371                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
2372                 let counterparty_node_id = self.get_counterparty_node_id();
2373                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
2374                         match htlc_update {
2375                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
2376                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
2377                                 },
2378                                 _ => {}
2379                         }
2380                 }
2381                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
2382                         // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
2383                         // returning a channel monitor update here would imply a channel monitor update before
2384                         // we even registered the channel monitor to begin with, which is invalid.
2385                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
2386                         // funding transaction, don't return a funding txo (which prevents providing the
2387                         // monitor update to the user, even if we return one).
2388                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
2389                         let generate_monitor_update = match self.channel_state {
2390                                 ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)|ChannelState::ShutdownComplete => true,
2391                                 _ => false,
2392                         };
2393                         if generate_monitor_update {
2394                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
2395                                 Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate {
2396                                         update_id: self.latest_monitor_update_id,
2397                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
2398                                 }))
2399                         } else { None }
2400                 } else { None };
2401                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
2402
2403                 self.channel_state = ChannelState::ShutdownComplete;
2404                 self.update_time_counter += 1;
2405                 ShutdownResult {
2406                         monitor_update,
2407                         dropped_outbound_htlcs,
2408                         unbroadcasted_batch_funding_txid,
2409                         channel_id: self.channel_id,
2410                         counterparty_node_id: self.counterparty_node_id,
2411                 }
2412         }
2413
2414         /// Only allowed after [`Self::channel_transaction_parameters`] is set.
2415         fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
2416                 let counterparty_keys = self.build_remote_transaction_keys();
2417                 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
2418
2419                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
2420                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
2421                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
2422                         &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
2423
2424                 match &self.holder_signer {
2425                         // TODO (arik): move match into calling method for Taproot
2426                         ChannelSignerType::Ecdsa(ecdsa) => {
2427                                 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
2428                                         .map(|(signature, _)| msgs::FundingSigned {
2429                                                 channel_id: self.channel_id(),
2430                                                 signature,
2431                                                 #[cfg(taproot)]
2432                                                 partial_signature_with_nonce: None,
2433                                         })
2434                                         .ok();
2435
2436                                 if funding_signed.is_none() {
2437                                         log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
2438                                         self.signer_pending_funding = true;
2439                                 } else if self.signer_pending_funding {
2440                                         log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
2441                                         self.signer_pending_funding = false;
2442                                 }
2443
2444                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
2445                                 (counterparty_initial_commitment_tx, funding_signed)
2446                         },
2447                         // TODO (taproot|arik)
2448                         #[cfg(taproot)]
2449                         _ => todo!()
2450                 }
2451         }
2452 }
2453
2454 // Internal utility functions for channels
2455
2456 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
2457 /// `channel_value_satoshis` in msat, set through
2458 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
2459 ///
2460 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
2461 ///
2462 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
2463 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
2464         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
2465                 1
2466         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
2467                 100
2468         } else {
2469                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
2470         };
2471         channel_value_satoshis * 10 * configured_percent
2472 }
2473
2474 /// Returns a minimum channel reserve value the remote needs to maintain,
2475 /// required by us according to the configured or default
2476 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
2477 ///
2478 /// Guaranteed to return a value no larger than channel_value_satoshis
2479 ///
2480 /// This is used both for outbound and inbound channels and has lower bound
2481 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
2482 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
2483         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
2484         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
2485 }
2486
2487 /// This is for legacy reasons, present for forward-compatibility.
2488 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
2489 /// from storage. Hence, we use this function to not persist default values of
2490 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
2491 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
2492         let (q, _) = channel_value_satoshis.overflowing_div(100);
2493         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
2494 }
2495
2496 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
2497 // Note that num_htlcs should not include dust HTLCs.
2498 #[inline]
2499 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2500         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
2501 }
2502
2503 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
2504 // Note that num_htlcs should not include dust HTLCs.
2505 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
2506         // Note that we need to divide before multiplying to round properly,
2507         // since the lowest denomination of bitcoin on-chain is the satoshi.
2508         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
2509 }
2510
2511 // Holder designates channel data owned for the benefit of the user client.
2512 // Counterparty designates channel data owned by the another channel participant entity.
2513 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
2514         pub context: ChannelContext<SP>,
2515 }
2516
2517 #[cfg(any(test, fuzzing))]
2518 struct CommitmentTxInfoCached {
2519         fee: u64,
2520         total_pending_htlcs: usize,
2521         next_holder_htlc_id: u64,
2522         next_counterparty_htlc_id: u64,
2523         feerate: u32,
2524 }
2525
2526 impl<SP: Deref> Channel<SP> where
2527         SP::Target: SignerProvider,
2528         <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
2529 {
2530         fn check_remote_fee<F: Deref, L: Deref>(
2531                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
2532                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
2533         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
2534         {
2535                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2536                         ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
2537                 } else {
2538                         ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
2539                 };
2540                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
2541                 if feerate_per_kw < lower_limit {
2542                         if let Some(cur_feerate) = cur_feerate_per_kw {
2543                                 if feerate_per_kw > cur_feerate {
2544                                         log_warn!(logger,
2545                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
2546                                                 cur_feerate, feerate_per_kw);
2547                                         return Ok(());
2548                                 }
2549                         }
2550                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
2551                 }
2552                 Ok(())
2553         }
2554
2555         #[inline]
2556         fn get_closing_scriptpubkey(&self) -> ScriptBuf {
2557                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
2558                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
2559                 // outside of those situations will fail.
2560                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
2561         }
2562
2563         #[inline]
2564         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
2565                 let mut ret =
2566                 (4 +                                                   // version
2567                  1 +                                                   // input count
2568                  36 +                                                  // prevout
2569                  1 +                                                   // script length (0)
2570                  4 +                                                   // sequence
2571                  1 +                                                   // output count
2572                  4                                                     // lock time
2573                  )*4 +                                                 // * 4 for non-witness parts
2574                 2 +                                                    // witness marker and flag
2575                 1 +                                                    // witness element count
2576                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
2577                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
2578                 2*(1 + 71);                                            // two signatures + sighash type flags
2579                 if let Some(spk) = a_scriptpubkey {
2580                         ret += ((8+1) +                                    // output values and script length
2581                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2582                 }
2583                 if let Some(spk) = b_scriptpubkey {
2584                         ret += ((8+1) +                                    // output values and script length
2585                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
2586                 }
2587                 ret
2588         }
2589
2590         #[inline]
2591         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
2592                 assert!(self.context.pending_inbound_htlcs.is_empty());
2593                 assert!(self.context.pending_outbound_htlcs.is_empty());
2594                 assert!(self.context.pending_update_fee.is_none());
2595
2596                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
2597                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
2598                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
2599
2600                 if value_to_holder < 0 {
2601                         assert!(self.context.is_outbound());
2602                         total_fee_satoshis += (-value_to_holder) as u64;
2603                 } else if value_to_counterparty < 0 {
2604                         assert!(!self.context.is_outbound());
2605                         total_fee_satoshis += (-value_to_counterparty) as u64;
2606                 }
2607
2608                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
2609                         value_to_counterparty = 0;
2610                 }
2611
2612                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
2613                         value_to_holder = 0;
2614                 }
2615
2616                 assert!(self.context.shutdown_scriptpubkey.is_some());
2617                 let holder_shutdown_script = self.get_closing_scriptpubkey();
2618                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
2619                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
2620
2621                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
2622                 (closing_transaction, total_fee_satoshis)
2623         }
2624
2625         fn funding_outpoint(&self) -> OutPoint {
2626                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
2627         }
2628
2629         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
2630         /// entirely.
2631         ///
2632         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
2633         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
2634         ///
2635         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
2636         /// disconnected).
2637         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
2638                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
2639         where L::Target: Logger {
2640                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
2641                 // (see equivalent if condition there).
2642                 assert!(self.context.channel_state.should_force_holding_cell());
2643                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
2644                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
2645                 self.context.latest_monitor_update_id = mon_update_id;
2646                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
2647                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
2648                 }
2649         }
2650
2651         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
2652                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
2653                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
2654                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
2655                 // either.
2656                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2657                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
2658                 }
2659
2660                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2661                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2662                 // these, but for now we just have to treat them as normal.
2663
2664                 let mut pending_idx = core::usize::MAX;
2665                 let mut htlc_value_msat = 0;
2666                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2667                         if htlc.htlc_id == htlc_id_arg {
2668                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
2669                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
2670                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
2671                                 match htlc.state {
2672                                         InboundHTLCState::Committed => {},
2673                                         InboundHTLCState::LocalRemoved(ref reason) => {
2674                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2675                                                 } else {
2676                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
2677                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2678                                                 }
2679                                                 return UpdateFulfillFetch::DuplicateClaim {};
2680                                         },
2681                                         _ => {
2682                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2683                                                 // Don't return in release mode here so that we can update channel_monitor
2684                                         }
2685                                 }
2686                                 pending_idx = idx;
2687                                 htlc_value_msat = htlc.amount_msat;
2688                                 break;
2689                         }
2690                 }
2691                 if pending_idx == core::usize::MAX {
2692                         #[cfg(any(test, fuzzing))]
2693                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
2694                         // this is simply a duplicate claim, not previously failed and we lost funds.
2695                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2696                         return UpdateFulfillFetch::DuplicateClaim {};
2697                 }
2698
2699                 // Now update local state:
2700                 //
2701                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
2702                 // can claim it even if the channel hits the chain before we see their next commitment.
2703                 self.context.latest_monitor_update_id += 1;
2704                 let monitor_update = ChannelMonitorUpdate {
2705                         update_id: self.context.latest_monitor_update_id,
2706                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
2707                                 payment_preimage: payment_preimage_arg.clone(),
2708                         }],
2709                 };
2710
2711                 if self.context.channel_state.should_force_holding_cell() {
2712                         // Note that this condition is the same as the assertion in
2713                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
2714                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
2715                         // do not not get into this branch.
2716                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2717                                 match pending_update {
2718                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2719                                                 if htlc_id_arg == htlc_id {
2720                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
2721                                                         self.context.latest_monitor_update_id -= 1;
2722                                                         #[cfg(any(test, fuzzing))]
2723                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2724                                                         return UpdateFulfillFetch::DuplicateClaim {};
2725                                                 }
2726                                         },
2727                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2728                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2729                                         {
2730                                                 if htlc_id_arg == htlc_id {
2731                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
2732                                                         // TODO: We may actually be able to switch to a fulfill here, though its
2733                                                         // rare enough it may not be worth the complexity burden.
2734                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
2735                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2736                                                 }
2737                                         },
2738                                         _ => {}
2739                                 }
2740                         }
2741                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
2742                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
2743                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
2744                         });
2745                         #[cfg(any(test, fuzzing))]
2746                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2747                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2748                 }
2749                 #[cfg(any(test, fuzzing))]
2750                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
2751
2752                 {
2753                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2754                         if let InboundHTLCState::Committed = htlc.state {
2755                         } else {
2756                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2757                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
2758                         }
2759                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
2760                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
2761                 }
2762
2763                 UpdateFulfillFetch::NewClaim {
2764                         monitor_update,
2765                         htlc_value_msat,
2766                         msg: Some(msgs::UpdateFulfillHTLC {
2767                                 channel_id: self.context.channel_id(),
2768                                 htlc_id: htlc_id_arg,
2769                                 payment_preimage: payment_preimage_arg,
2770                         }),
2771                 }
2772         }
2773
2774         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
2775                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
2776                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
2777                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
2778                                 // Even if we aren't supposed to let new monitor updates with commitment state
2779                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
2780                                 // matter what. Sadly, to push a new monitor update which flies before others
2781                                 // already queued, we have to insert it into the pending queue and update the
2782                                 // update_ids of all the following monitors.
2783                                 if release_cs_monitor && msg.is_some() {
2784                                         let mut additional_update = self.build_commitment_no_status_check(logger);
2785                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
2786                                         // to be strictly increasing by one, so decrement it here.
2787                                         self.context.latest_monitor_update_id = monitor_update.update_id;
2788                                         monitor_update.updates.append(&mut additional_update.updates);
2789                                 } else {
2790                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
2791                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
2792                                         monitor_update.update_id = new_mon_id;
2793                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
2794                                                 held_update.update.update_id += 1;
2795                                         }
2796                                         if msg.is_some() {
2797                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
2798                                                 let update = self.build_commitment_no_status_check(logger);
2799                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
2800                                                         update,
2801                                                 });
2802                                         }
2803                                 }
2804
2805                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
2806                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
2807                         },
2808                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
2809                 }
2810         }
2811
2812         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2813         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2814         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2815         /// before we fail backwards.
2816         ///
2817         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2818         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2819         /// [`ChannelError::Ignore`].
2820         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
2821         -> Result<(), ChannelError> where L::Target: Logger {
2822                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
2823                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
2824         }
2825
2826         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
2827         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
2828         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
2829         /// before we fail backwards.
2830         ///
2831         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
2832         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
2833         /// [`ChannelError::Ignore`].
2834         fn fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, mut force_holding_cell: bool, logger: &L)
2835         -> Result<Option<msgs::UpdateFailHTLC>, ChannelError> where L::Target: Logger {
2836                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
2837                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
2838                 }
2839
2840                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
2841                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
2842                 // these, but for now we just have to treat them as normal.
2843
2844                 let mut pending_idx = core::usize::MAX;
2845                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
2846                         if htlc.htlc_id == htlc_id_arg {
2847                                 match htlc.state {
2848                                         InboundHTLCState::Committed => {},
2849                                         InboundHTLCState::LocalRemoved(ref reason) => {
2850                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
2851                                                 } else {
2852                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2853                                                 }
2854                                                 return Ok(None);
2855                                         },
2856                                         _ => {
2857                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
2858                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
2859                                         }
2860                                 }
2861                                 pending_idx = idx;
2862                         }
2863                 }
2864                 if pending_idx == core::usize::MAX {
2865                         #[cfg(any(test, fuzzing))]
2866                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
2867                         // is simply a duplicate fail, not previously failed and we failed-back too early.
2868                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2869                         return Ok(None);
2870                 }
2871
2872                 if self.context.channel_state.should_force_holding_cell() {
2873                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
2874                         force_holding_cell = true;
2875                 }
2876
2877                 // Now update local state:
2878                 if force_holding_cell {
2879                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
2880                                 match pending_update {
2881                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2882                                                 if htlc_id_arg == htlc_id {
2883                                                         #[cfg(any(test, fuzzing))]
2884                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
2885                                                         return Ok(None);
2886                                                 }
2887                                         },
2888                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
2889                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
2890                                         {
2891                                                 if htlc_id_arg == htlc_id {
2892                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
2893                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
2894                                                 }
2895                                         },
2896                                         _ => {}
2897                                 }
2898                         }
2899                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
2900                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::FailHTLC {
2901                                 htlc_id: htlc_id_arg,
2902                                 err_packet,
2903                         });
2904                         return Ok(None);
2905                 }
2906
2907                 log_trace!(logger, "Failing HTLC ID {} back with a update_fail_htlc message in channel {}.", htlc_id_arg, &self.context.channel_id());
2908                 {
2909                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
2910                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(err_packet.clone()));
2911                 }
2912
2913                 Ok(Some(msgs::UpdateFailHTLC {
2914                         channel_id: self.context.channel_id(),
2915                         htlc_id: htlc_id_arg,
2916                         reason: err_packet
2917                 }))
2918         }
2919
2920         // Message handlers:
2921         /// Updates the state of the channel to indicate that all channels in the batch have received
2922         /// funding_signed and persisted their monitors.
2923         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
2924         /// treated as a non-batch channel going forward.
2925         pub fn set_batch_ready(&mut self) {
2926                 self.context.is_batch_funding = None;
2927                 self.context.channel_state.clear_waiting_for_batch();
2928         }
2929
2930         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
2931         /// and the channel is now usable (and public), this may generate an announcement_signatures to
2932         /// reply with.
2933         pub fn channel_ready<NS: Deref, L: Deref>(
2934                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
2935                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
2936         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
2937         where
2938                 NS::Target: NodeSigner,
2939                 L::Target: Logger
2940         {
2941                 if self.context.channel_state.is_peer_disconnected() {
2942                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
2943                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
2944                 }
2945
2946                 if let Some(scid_alias) = msg.short_channel_id_alias {
2947                         if Some(scid_alias) != self.context.short_channel_id {
2948                                 // The scid alias provided can be used to route payments *from* our counterparty,
2949                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
2950                                 // when routing outbound payments.
2951                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
2952                         }
2953                 }
2954
2955                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
2956                 // batch, but we can receive channel_ready messages.
2957                 let mut check_reconnection = false;
2958                 match &self.context.channel_state {
2959                         ChannelState::AwaitingChannelReady(flags) => {
2960                                 let flags = *flags & !FundedStateFlags::ALL;
2961                                 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
2962                                 if flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
2963                                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
2964                                         check_reconnection = true;
2965                                 } else if (flags & !AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
2966                                         self.context.channel_state.set_their_channel_ready();
2967                                 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
2968                                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
2969                                         self.context.update_time_counter += 1;
2970                                 } else {
2971                                         // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
2972                                         debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
2973                                 }
2974                         }
2975                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
2976                         ChannelState::ChannelReady(_) => check_reconnection = true,
2977                         _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
2978                 }
2979                 if check_reconnection {
2980                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
2981                         // required, or they're sending a fresh SCID alias.
2982                         let expected_point =
2983                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
2984                                         // If they haven't ever sent an updated point, the point they send should match
2985                                         // the current one.
2986                                         self.context.counterparty_cur_commitment_point
2987                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
2988                                         // If we've advanced the commitment number once, the second commitment point is
2989                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
2990                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
2991                                         self.context.counterparty_prev_commitment_point
2992                                 } else {
2993                                         // If they have sent updated points, channel_ready is always supposed to match
2994                                         // their "first" point, which we re-derive here.
2995                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
2996                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
2997                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
2998                                 };
2999                         if expected_point != Some(msg.next_per_commitment_point) {
3000                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3001                         }
3002                         return Ok(None);
3003                 }
3004
3005                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3006                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3007
3008                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3009
3010                 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3011         }
3012
3013         pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3014                 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3015                 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3016         ) -> Result<(), ChannelError>
3017         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3018                 FE::Target: FeeEstimator, L::Target: Logger,
3019         {
3020                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3021                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3022                 }
3023                 // We can't accept HTLCs sent after we've sent a shutdown.
3024                 if self.context.channel_state.is_local_shutdown_sent() {
3025                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3026                 }
3027                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3028                 if self.context.channel_state.is_remote_shutdown_sent() {
3029                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3030                 }
3031                 if self.context.channel_state.is_peer_disconnected() {
3032                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3033                 }
3034                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3035                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3036                 }
3037                 if msg.amount_msat == 0 {
3038                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3039                 }
3040                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3041                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3042                 }
3043
3044                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3045                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3046                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3047                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3048                 }
3049                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3050                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3051                 }
3052
3053                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3054                 // the reserve_satoshis we told them to always have as direct payment so that they lose
3055                 // something if we punish them for broadcasting an old state).
3056                 // Note that we don't really care about having a small/no to_remote output in our local
3057                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3058                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3059                 // present in the next commitment transaction we send them (at least for fulfilled ones,
3060                 // failed ones won't modify value_to_self).
3061                 // Note that we will send HTLCs which another instance of rust-lightning would think
3062                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3063                 // Channel state once they will not be present in the next received commitment
3064                 // transaction).
3065                 let mut removed_outbound_total_msat = 0;
3066                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3067                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3068                                 removed_outbound_total_msat += htlc.amount_msat;
3069                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3070                                 removed_outbound_total_msat += htlc.amount_msat;
3071                         }
3072                 }
3073
3074                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3075                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3076                         (0, 0)
3077                 } else {
3078                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3079                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3080                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3081                 };
3082                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3083                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3084                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3085                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3086                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3087                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3088                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3089                         }
3090                 }
3091
3092                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3093                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3094                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3095                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3096                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3097                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3098                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3099                         }
3100                 }
3101
3102                 let pending_value_to_self_msat =
3103                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3104                 let pending_remote_value_msat =
3105                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3106                 if pending_remote_value_msat < msg.amount_msat {
3107                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3108                 }
3109
3110                 // Check that the remote can afford to pay for this HTLC on-chain at the current
3111                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3112                 {
3113                         let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3114                                 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3115                                 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3116                         };
3117                         let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3118                                 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3119                         } else {
3120                                 0
3121                         };
3122                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3123                                 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3124                         };
3125                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3126                                 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3127                         }
3128                 }
3129
3130                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3131                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3132                 } else {
3133                         0
3134                 };
3135                 if !self.context.is_outbound() {
3136                         // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3137                         // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3138                         // side, only on the sender's. Note that with anchor outputs we are no longer as
3139                         // sensitive to fee spikes, so we need to account for them.
3140                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3141                         let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3142                         if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3143                                 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3144                         }
3145                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3146                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3147                                 // the HTLC, i.e. its status is already set to failing.
3148                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3149                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3150                         }
3151                 } else {
3152                         // Check that they won't violate our local required channel reserve by adding this HTLC.
3153                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3154                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3155                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3156                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3157                         }
3158                 }
3159                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3160                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3161                 }
3162                 if msg.cltv_expiry >= 500000000 {
3163                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3164                 }
3165
3166                 if self.context.channel_state.is_local_shutdown_sent() {
3167                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3168                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3169                         }
3170                 }
3171
3172                 // Now update local state:
3173                 self.context.next_counterparty_htlc_id += 1;
3174                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3175                         htlc_id: msg.htlc_id,
3176                         amount_msat: msg.amount_msat,
3177                         payment_hash: msg.payment_hash,
3178                         cltv_expiry: msg.cltv_expiry,
3179                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3180                 });
3181                 Ok(())
3182         }
3183
3184         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3185         #[inline]
3186         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3187                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3188                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3189                         if htlc.htlc_id == htlc_id {
3190                                 let outcome = match check_preimage {
3191                                         None => fail_reason.into(),
3192                                         Some(payment_preimage) => {
3193                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3194                                                 if payment_hash != htlc.payment_hash {
3195                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3196                                                 }
3197                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
3198                                         }
3199                                 };
3200                                 match htlc.state {
3201                                         OutboundHTLCState::LocalAnnounced(_) =>
3202                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3203                                         OutboundHTLCState::Committed => {
3204                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3205                                         },
3206                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3207                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3208                                 }
3209                                 return Ok(htlc);
3210                         }
3211                 }
3212                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
3213         }
3214
3215         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64), ChannelError> {
3216                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3217                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
3218                 }
3219                 if self.context.channel_state.is_peer_disconnected() {
3220                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
3221                 }
3222
3223                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat))
3224         }
3225
3226         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3227                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3228                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
3229                 }
3230                 if self.context.channel_state.is_peer_disconnected() {
3231                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
3232                 }
3233
3234                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3235                 Ok(())
3236         }
3237
3238         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
3239                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3240                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
3241                 }
3242                 if self.context.channel_state.is_peer_disconnected() {
3243                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
3244                 }
3245
3246                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
3247                 Ok(())
3248         }
3249
3250         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
3251                 where L::Target: Logger
3252         {
3253                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3254                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
3255                 }
3256                 if self.context.channel_state.is_peer_disconnected() {
3257                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
3258                 }
3259                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3260                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
3261                 }
3262
3263                 let funding_script = self.context.get_funding_redeemscript();
3264
3265                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3266
3267                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
3268                 let commitment_txid = {
3269                         let trusted_tx = commitment_stats.tx.trust();
3270                         let bitcoin_tx = trusted_tx.built_transaction();
3271                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
3272
3273                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
3274                                 log_bytes!(msg.signature.serialize_compact()[..]),
3275                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
3276                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
3277                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
3278                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
3279                         }
3280                         bitcoin_tx.txid
3281                 };
3282                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
3283
3284                 // If our counterparty updated the channel fee in this commitment transaction, check that
3285                 // they can actually afford the new fee now.
3286                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
3287                         update_state == FeeUpdateState::RemoteAnnounced
3288                 } else { false };
3289                 if update_fee {
3290                         debug_assert!(!self.context.is_outbound());
3291                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
3292                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
3293                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
3294                         }
3295                 }
3296                 #[cfg(any(test, fuzzing))]
3297                 {
3298                         if self.context.is_outbound() {
3299                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
3300                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3301                                 if let Some(info) = projected_commit_tx_info {
3302                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
3303                                                 + self.context.holding_cell_htlc_updates.len();
3304                                         if info.total_pending_htlcs == total_pending_htlcs
3305                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
3306                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
3307                                                 && info.feerate == self.context.feerate_per_kw {
3308                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
3309                                                 }
3310                                 }
3311                         }
3312                 }
3313
3314                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
3315                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
3316                 }
3317
3318                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
3319                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
3320                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
3321                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
3322                 // backwards compatibility, we never use it in production. To provide test coverage, here,
3323                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
3324                 #[allow(unused_assignments, unused_mut)]
3325                 let mut separate_nondust_htlc_sources = false;
3326                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
3327                         use core::hash::{BuildHasher, Hasher};
3328                         // Get a random value using the only std API to do so - the DefaultHasher
3329                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
3330                         separate_nondust_htlc_sources = rand_val % 2 == 0;
3331                 }
3332
3333                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
3334                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
3335                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
3336                         if let Some(_) = htlc.transaction_output_index {
3337                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
3338                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
3339                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
3340
3341                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
3342                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
3343                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
3344                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
3345                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
3346                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
3347                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
3348                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
3349                                 }
3350                                 if !separate_nondust_htlc_sources {
3351                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
3352                                 }
3353                         } else {
3354                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
3355                         }
3356                         if separate_nondust_htlc_sources {
3357                                 if let Some(source) = source_opt.take() {
3358                                         nondust_htlc_sources.push(source);
3359                                 }
3360                         }
3361                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
3362                 }
3363
3364                 let holder_commitment_tx = HolderCommitmentTransaction::new(
3365                         commitment_stats.tx,
3366                         msg.signature,
3367                         msg.htlc_signatures.clone(),
3368                         &self.context.get_holder_pubkeys().funding_pubkey,
3369                         self.context.counterparty_funding_pubkey()
3370                 );
3371
3372                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
3373                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
3374
3375                 // Update state now that we've passed all the can-fail calls...
3376                 let mut need_commitment = false;
3377                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
3378                         if *update_state == FeeUpdateState::RemoteAnnounced {
3379                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
3380                                 need_commitment = true;
3381                         }
3382                 }
3383
3384                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
3385                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
3386                                 Some(forward_info.clone())
3387                         } else { None };
3388                         if let Some(forward_info) = new_forward {
3389                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
3390                                         &htlc.payment_hash, &self.context.channel_id);
3391                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
3392                                 need_commitment = true;
3393                         }
3394                 }
3395                 let mut claimed_htlcs = Vec::new();
3396                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3397                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
3398                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
3399                                         &htlc.payment_hash, &self.context.channel_id);
3400                                 // Grab the preimage, if it exists, instead of cloning
3401                                 let mut reason = OutboundHTLCOutcome::Success(None);
3402                                 mem::swap(outcome, &mut reason);
3403                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
3404                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
3405                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
3406                                         // have a `Success(None)` reason. In this case we could forget some HTLC
3407                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
3408                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
3409                                         // claim anyway.
3410                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
3411                                 }
3412                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
3413                                 need_commitment = true;
3414                         }
3415                 }
3416
3417                 self.context.latest_monitor_update_id += 1;
3418                 let mut monitor_update = ChannelMonitorUpdate {
3419                         update_id: self.context.latest_monitor_update_id,
3420                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
3421                                 commitment_tx: holder_commitment_tx,
3422                                 htlc_outputs: htlcs_and_sigs,
3423                                 claimed_htlcs,
3424                                 nondust_htlc_sources,
3425                         }]
3426                 };
3427
3428                 self.context.cur_holder_commitment_transaction_number -= 1;
3429                 self.context.expecting_peer_commitment_signed = false;
3430                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
3431                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
3432                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
3433
3434                 if self.context.channel_state.is_monitor_update_in_progress() {
3435                         // In case we initially failed monitor updating without requiring a response, we need
3436                         // to make sure the RAA gets sent first.
3437                         self.context.monitor_pending_revoke_and_ack = true;
3438                         if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3439                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
3440                                 // the corresponding HTLC status updates so that
3441                                 // get_last_commitment_update_for_send includes the right HTLCs.
3442                                 self.context.monitor_pending_commitment_signed = true;
3443                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3444                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3445                                 // strictly increasing by one, so decrement it here.
3446                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3447                                 monitor_update.updates.append(&mut additional_update.updates);
3448                         }
3449                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
3450                                 &self.context.channel_id);
3451                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
3452                 }
3453
3454                 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
3455                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
3456                         // we'll send one right away when we get the revoke_and_ack when we
3457                         // free_holding_cell_htlcs().
3458                         let mut additional_update = self.build_commitment_no_status_check(logger);
3459                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3460                         // strictly increasing by one, so decrement it here.
3461                         self.context.latest_monitor_update_id = monitor_update.update_id;
3462                         monitor_update.updates.append(&mut additional_update.updates);
3463                         true
3464                 } else { false };
3465
3466                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
3467                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
3468                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
3469                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
3470         }
3471
3472         /// Public version of the below, checking relevant preconditions first.
3473         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
3474         /// returns `(None, Vec::new())`.
3475         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
3476                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3477         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3478         where F::Target: FeeEstimator, L::Target: Logger
3479         {
3480                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && !self.context.channel_state.should_force_holding_cell() {
3481                         self.free_holding_cell_htlcs(fee_estimator, logger)
3482                 } else { (None, Vec::new()) }
3483         }
3484
3485         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
3486         /// for our counterparty.
3487         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
3488                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3489         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
3490         where F::Target: FeeEstimator, L::Target: Logger
3491         {
3492                 assert!(!self.context.channel_state.is_monitor_update_in_progress());
3493                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
3494                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
3495                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
3496
3497                         let mut monitor_update = ChannelMonitorUpdate {
3498                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
3499                                 updates: Vec::new(),
3500                         };
3501
3502                         let mut htlc_updates = Vec::new();
3503                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
3504                         let mut update_add_count = 0;
3505                         let mut update_fulfill_count = 0;
3506                         let mut update_fail_count = 0;
3507                         let mut htlcs_to_fail = Vec::new();
3508                         for htlc_update in htlc_updates.drain(..) {
3509                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
3510                                 // fee races with adding too many outputs which push our total payments just over
3511                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
3512                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
3513                                 // to rebalance channels.
3514                                 match &htlc_update {
3515                                         &HTLCUpdateAwaitingACK::AddHTLC {
3516                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
3517                                                 skimmed_fee_msat, blinding_point, ..
3518                                         } => {
3519                                                 match self.send_htlc(
3520                                                         amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
3521                                                         false, skimmed_fee_msat, blinding_point, fee_estimator, logger
3522                                                 ) {
3523                                                         Ok(_) => update_add_count += 1,
3524                                                         Err(e) => {
3525                                                                 match e {
3526                                                                         ChannelError::Ignore(ref msg) => {
3527                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
3528                                                                                 // If we fail to send here, then this HTLC should
3529                                                                                 // be failed backwards. Failing to send here
3530                                                                                 // indicates that this HTLC may keep being put back
3531                                                                                 // into the holding cell without ever being
3532                                                                                 // successfully forwarded/failed/fulfilled, causing
3533                                                                                 // our counterparty to eventually close on us.
3534                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
3535                                                                         },
3536                                                                         _ => {
3537                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
3538                                                                         },
3539                                                                 }
3540                                                         }
3541                                                 }
3542                                         },
3543                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
3544                                                 // If an HTLC claim was previously added to the holding cell (via
3545                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
3546                                                 // not fail - any in between attempts to claim the HTLC will have resulted
3547                                                 // in it hitting the holding cell again and we cannot change the state of a
3548                                                 // holding cell HTLC from fulfill to anything else.
3549                                                 let mut additional_monitor_update =
3550                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
3551                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
3552                                                         { monitor_update } else { unreachable!() };
3553                                                 update_fulfill_count += 1;
3554                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
3555                                         },
3556                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
3557                                                 match self.fail_htlc(htlc_id, err_packet.clone(), false, logger) {
3558                                                         Ok(update_fail_msg_option) => {
3559                                                                 // If an HTLC failure was previously added to the holding cell (via
3560                                                                 // `queue_fail_htlc`) then generating the fail message itself must
3561                                                                 // not fail - we should never end up in a state where we double-fail
3562                                                                 // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
3563                                                                 // for a full revocation before failing.
3564                                                                 debug_assert!(update_fail_msg_option.is_some());
3565                                                                 update_fail_count += 1;
3566                                                         },
3567                                                         Err(e) => {
3568                                                                 if let ChannelError::Ignore(_) = e {}
3569                                                                 else {
3570                                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
3571                                                                 }
3572                                                         }
3573                                                 }
3574                                         },
3575                                         &HTLCUpdateAwaitingACK::FailMalformedHTLC { .. } => {
3576                                                 todo!()
3577                                         },
3578                                 }
3579                         }
3580                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
3581                                 return (None, htlcs_to_fail);
3582                         }
3583                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
3584                                 self.send_update_fee(feerate, false, fee_estimator, logger)
3585                         } else {
3586                                 None
3587                         };
3588
3589                         let mut additional_update = self.build_commitment_no_status_check(logger);
3590                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
3591                         // but we want them to be strictly increasing by one, so reset it here.
3592                         self.context.latest_monitor_update_id = monitor_update.update_id;
3593                         monitor_update.updates.append(&mut additional_update.updates);
3594
3595                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
3596                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
3597                                 update_add_count, update_fulfill_count, update_fail_count);
3598
3599                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
3600                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
3601                 } else {
3602                         (None, Vec::new())
3603                 }
3604         }
3605
3606         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
3607         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
3608         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
3609         /// generating an appropriate error *after* the channel state has been updated based on the
3610         /// revoke_and_ack message.
3611         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
3612                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
3613         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
3614         where F::Target: FeeEstimator, L::Target: Logger,
3615         {
3616                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3617                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
3618                 }
3619                 if self.context.channel_state.is_peer_disconnected() {
3620                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
3621                 }
3622                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
3623                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
3624                 }
3625
3626                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
3627
3628                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
3629                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
3630                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
3631                         }
3632                 }
3633
3634                 if !self.context.channel_state.is_awaiting_remote_revoke() {
3635                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
3636                         // haven't given them a new commitment transaction to broadcast). We should probably
3637                         // take advantage of this by updating our channel monitor, sending them an error, and
3638                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
3639                         // lot of work, and there's some chance this is all a misunderstanding anyway.
3640                         // We have to do *something*, though, since our signer may get mad at us for otherwise
3641                         // jumping a remote commitment number, so best to just force-close and move on.
3642                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
3643                 }
3644
3645                 #[cfg(any(test, fuzzing))]
3646                 {
3647                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
3648                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
3649                 }
3650
3651                 match &self.context.holder_signer {
3652                         ChannelSignerType::Ecdsa(ecdsa) => {
3653                                 ecdsa.validate_counterparty_revocation(
3654                                         self.context.cur_counterparty_commitment_transaction_number + 1,
3655                                         &secret
3656                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
3657                         },
3658                         // TODO (taproot|arik)
3659                         #[cfg(taproot)]
3660                         _ => todo!()
3661                 };
3662
3663                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
3664                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
3665                 self.context.latest_monitor_update_id += 1;
3666                 let mut monitor_update = ChannelMonitorUpdate {
3667                         update_id: self.context.latest_monitor_update_id,
3668                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
3669                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
3670                                 secret: msg.per_commitment_secret,
3671                         }],
3672                 };
3673
3674                 // Update state now that we've passed all the can-fail calls...
3675                 // (note that we may still fail to generate the new commitment_signed message, but that's
3676                 // OK, we step the channel here and *then* if the new generation fails we can fail the
3677                 // channel based on that, but stepping stuff here should be safe either way.
3678                 self.context.channel_state.clear_awaiting_remote_revoke();
3679                 self.context.sent_message_awaiting_response = None;
3680                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3681                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3682                 self.context.cur_counterparty_commitment_transaction_number -= 1;
3683
3684                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3685                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
3686                 }
3687
3688                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
3689                 let mut to_forward_infos = Vec::new();
3690                 let mut revoked_htlcs = Vec::new();
3691                 let mut finalized_claimed_htlcs = Vec::new();
3692                 let mut update_fail_htlcs = Vec::new();
3693                 let mut update_fail_malformed_htlcs = Vec::new();
3694                 let mut require_commitment = false;
3695                 let mut value_to_self_msat_diff: i64 = 0;
3696
3697                 {
3698                         // Take references explicitly so that we can hold multiple references to self.context.
3699                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
3700                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
3701                         let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
3702
3703                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
3704                         pending_inbound_htlcs.retain(|htlc| {
3705                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
3706                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
3707                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3708                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
3709                                         }
3710                                         *expecting_peer_commitment_signed = true;
3711                                         false
3712                                 } else { true }
3713                         });
3714                         pending_outbound_htlcs.retain(|htlc| {
3715                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
3716                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
3717                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
3718                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
3719                                         } else {
3720                                                 finalized_claimed_htlcs.push(htlc.source.clone());
3721                                                 // They fulfilled, so we sent them money
3722                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
3723                                         }
3724                                         false
3725                                 } else { true }
3726                         });
3727                         for htlc in pending_inbound_htlcs.iter_mut() {
3728                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
3729                                         true
3730                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
3731                                         true
3732                                 } else { false };
3733                                 if swap {
3734                                         let mut state = InboundHTLCState::Committed;
3735                                         mem::swap(&mut state, &mut htlc.state);
3736
3737                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
3738                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
3739                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
3740                                                 require_commitment = true;
3741                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
3742                                                 match forward_info {
3743                                                         PendingHTLCStatus::Fail(fail_msg) => {
3744                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
3745                                                                 require_commitment = true;
3746                                                                 match fail_msg {
3747                                                                         HTLCFailureMsg::Relay(msg) => {
3748                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
3749                                                                                 update_fail_htlcs.push(msg)
3750                                                                         },
3751                                                                         HTLCFailureMsg::Malformed(msg) => {
3752                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
3753                                                                                 update_fail_malformed_htlcs.push(msg)
3754                                                                         },
3755                                                                 }
3756                                                         },
3757                                                         PendingHTLCStatus::Forward(forward_info) => {
3758                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
3759                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
3760                                                                 htlc.state = InboundHTLCState::Committed;
3761                                                         }
3762                                                 }
3763                                         }
3764                                 }
3765                         }
3766                         for htlc in pending_outbound_htlcs.iter_mut() {
3767                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
3768                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
3769                                         htlc.state = OutboundHTLCState::Committed;
3770                                         *expecting_peer_commitment_signed = true;
3771                                 }
3772                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
3773                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
3774                                         // Grab the preimage, if it exists, instead of cloning
3775                                         let mut reason = OutboundHTLCOutcome::Success(None);
3776                                         mem::swap(outcome, &mut reason);
3777                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
3778                                         require_commitment = true;
3779                                 }
3780                         }
3781                 }
3782                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
3783
3784                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
3785                         match update_state {
3786                                 FeeUpdateState::Outbound => {
3787                                         debug_assert!(self.context.is_outbound());
3788                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
3789                                         self.context.feerate_per_kw = feerate;
3790                                         self.context.pending_update_fee = None;
3791                                         self.context.expecting_peer_commitment_signed = true;
3792                                 },
3793                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
3794                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
3795                                         debug_assert!(!self.context.is_outbound());
3796                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
3797                                         require_commitment = true;
3798                                         self.context.feerate_per_kw = feerate;
3799                                         self.context.pending_update_fee = None;
3800                                 },
3801                         }
3802                 }
3803
3804                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
3805                 let release_state_str =
3806                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
3807                 macro_rules! return_with_htlcs_to_fail {
3808                         ($htlcs_to_fail: expr) => {
3809                                 if !release_monitor {
3810                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3811                                                 update: monitor_update,
3812                                         });
3813                                         return Ok(($htlcs_to_fail, None));
3814                                 } else {
3815                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
3816                                 }
3817                         }
3818                 }
3819
3820                 if self.context.channel_state.is_monitor_update_in_progress() {
3821                         // We can't actually generate a new commitment transaction (incl by freeing holding
3822                         // cells) while we can't update the monitor, so we just return what we have.
3823                         if require_commitment {
3824                                 self.context.monitor_pending_commitment_signed = true;
3825                                 // When the monitor updating is restored we'll call
3826                                 // get_last_commitment_update_for_send(), which does not update state, but we're
3827                                 // definitely now awaiting a remote revoke before we can step forward any more, so
3828                                 // set it here.
3829                                 let mut additional_update = self.build_commitment_no_status_check(logger);
3830                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3831                                 // strictly increasing by one, so decrement it here.
3832                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3833                                 monitor_update.updates.append(&mut additional_update.updates);
3834                         }
3835                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
3836                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
3837                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
3838                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
3839                         return_with_htlcs_to_fail!(Vec::new());
3840                 }
3841
3842                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
3843                         (Some(mut additional_update), htlcs_to_fail) => {
3844                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
3845                                 // strictly increasing by one, so decrement it here.
3846                                 self.context.latest_monitor_update_id = monitor_update.update_id;
3847                                 monitor_update.updates.append(&mut additional_update.updates);
3848
3849                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
3850                                         &self.context.channel_id(), release_state_str);
3851
3852                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3853                                 return_with_htlcs_to_fail!(htlcs_to_fail);
3854                         },
3855                         (None, htlcs_to_fail) => {
3856                                 if require_commitment {
3857                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3858
3859                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
3860                                         // strictly increasing by one, so decrement it here.
3861                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3862                                         monitor_update.updates.append(&mut additional_update.updates);
3863
3864                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
3865                                                 &self.context.channel_id(),
3866                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
3867                                                 release_state_str);
3868
3869                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3870                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3871                                 } else {
3872                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
3873                                                 &self.context.channel_id(), release_state_str);
3874
3875                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
3876                                         return_with_htlcs_to_fail!(htlcs_to_fail);
3877                                 }
3878                         }
3879                 }
3880         }
3881
3882         /// Queues up an outbound update fee by placing it in the holding cell. You should call
3883         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
3884         /// commitment update.
3885         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
3886                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
3887         where F::Target: FeeEstimator, L::Target: Logger
3888         {
3889                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
3890                 assert!(msg_opt.is_none(), "We forced holding cell?");
3891         }
3892
3893         /// Adds a pending update to this channel. See the doc for send_htlc for
3894         /// further details on the optionness of the return value.
3895         /// If our balance is too low to cover the cost of the next commitment transaction at the
3896         /// new feerate, the update is cancelled.
3897         ///
3898         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
3899         /// [`Channel`] if `force_holding_cell` is false.
3900         fn send_update_fee<F: Deref, L: Deref>(
3901                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
3902                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
3903         ) -> Option<msgs::UpdateFee>
3904         where F::Target: FeeEstimator, L::Target: Logger
3905         {
3906                 if !self.context.is_outbound() {
3907                         panic!("Cannot send fee from inbound channel");
3908                 }
3909                 if !self.context.is_usable() {
3910                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
3911                 }
3912                 if !self.context.is_live() {
3913                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
3914                 }
3915
3916                 // Before proposing a feerate update, check that we can actually afford the new fee.
3917                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
3918                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
3919                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
3920                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
3921                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
3922                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
3923                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
3924                         //TODO: auto-close after a number of failures?
3925                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
3926                         return None;
3927                 }
3928
3929                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
3930                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
3931                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
3932                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3933                 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
3934                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3935                         return None;
3936                 }
3937                 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3938                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
3939                         return None;
3940                 }
3941
3942                 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
3943                         force_holding_cell = true;
3944                 }
3945
3946                 if force_holding_cell {
3947                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
3948                         return None;
3949                 }
3950
3951                 debug_assert!(self.context.pending_update_fee.is_none());
3952                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
3953
3954                 Some(msgs::UpdateFee {
3955                         channel_id: self.context.channel_id,
3956                         feerate_per_kw,
3957                 })
3958         }
3959
3960         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
3961         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
3962         /// resent.
3963         /// No further message handling calls may be made until a channel_reestablish dance has
3964         /// completed.
3965         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
3966         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
3967                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
3968                 if self.context.channel_state.is_pre_funded_state() {
3969                         return Err(())
3970                 }
3971
3972                 if self.context.channel_state.is_peer_disconnected() {
3973                         // While the below code should be idempotent, it's simpler to just return early, as
3974                         // redundant disconnect events can fire, though they should be rare.
3975                         return Ok(());
3976                 }
3977
3978                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
3979                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
3980                 }
3981
3982                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
3983                 // will be retransmitted.
3984                 self.context.last_sent_closing_fee = None;
3985                 self.context.pending_counterparty_closing_signed = None;
3986                 self.context.closing_fee_limits = None;
3987
3988                 let mut inbound_drop_count = 0;
3989                 self.context.pending_inbound_htlcs.retain(|htlc| {
3990                         match htlc.state {
3991                                 InboundHTLCState::RemoteAnnounced(_) => {
3992                                         // They sent us an update_add_htlc but we never got the commitment_signed.
3993                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
3994                                         // this HTLC accordingly
3995                                         inbound_drop_count += 1;
3996                                         false
3997                                 },
3998                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
3999                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
4000                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4001                                         // in response to it yet, so don't touch it.
4002                                         true
4003                                 },
4004                                 InboundHTLCState::Committed => true,
4005                                 InboundHTLCState::LocalRemoved(_) => {
4006                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4007                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
4008                                         // (that we missed). Keep this around for now and if they tell us they missed
4009                                         // the commitment_signed we can re-transmit the update then.
4010                                         true
4011                                 },
4012                         }
4013                 });
4014                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4015
4016                 if let Some((_, update_state)) = self.context.pending_update_fee {
4017                         if update_state == FeeUpdateState::RemoteAnnounced {
4018                                 debug_assert!(!self.context.is_outbound());
4019                                 self.context.pending_update_fee = None;
4020                         }
4021                 }
4022
4023                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4024                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4025                                 // They sent us an update to remove this but haven't yet sent the corresponding
4026                                 // commitment_signed, we need to move it back to Committed and they can re-send
4027                                 // the update upon reconnection.
4028                                 htlc.state = OutboundHTLCState::Committed;
4029                         }
4030                 }
4031
4032                 self.context.sent_message_awaiting_response = None;
4033
4034                 self.context.channel_state.set_peer_disconnected();
4035                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4036                 Ok(())
4037         }
4038
4039         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4040         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4041         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4042         /// update completes (potentially immediately).
4043         /// The messages which were generated with the monitor update must *not* have been sent to the
4044         /// remote end, and must instead have been dropped. They will be regenerated when
4045         /// [`Self::monitor_updating_restored`] is called.
4046         ///
4047         /// [`ChannelManager`]: super::channelmanager::ChannelManager
4048         /// [`chain::Watch`]: crate::chain::Watch
4049         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4050         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4051                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4052                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4053                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4054         ) {
4055                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4056                 self.context.monitor_pending_commitment_signed |= resend_commitment;
4057                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4058                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4059                 self.context.monitor_pending_failures.append(&mut pending_fails);
4060                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4061                 self.context.channel_state.set_monitor_update_in_progress();
4062         }
4063
4064         /// Indicates that the latest ChannelMonitor update has been committed by the client
4065         /// successfully and we should restore normal operation. Returns messages which should be sent
4066         /// to the remote side.
4067         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4068                 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4069                 user_config: &UserConfig, best_block_height: u32
4070         ) -> MonitorRestoreUpdates
4071         where
4072                 L::Target: Logger,
4073                 NS::Target: NodeSigner
4074         {
4075                 assert!(self.context.channel_state.is_monitor_update_in_progress());
4076                 self.context.channel_state.clear_monitor_update_in_progress();
4077
4078                 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4079                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4080                 // first received the funding_signed.
4081                 let mut funding_broadcastable =
4082                         if self.context.is_outbound() &&
4083                                 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4084                                 matches!(self.context.channel_state, ChannelState::ChannelReady(_))
4085                         {
4086                                 self.context.funding_transaction.take()
4087                         } else { None };
4088                 // That said, if the funding transaction is already confirmed (ie we're active with a
4089                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4090                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4091                         funding_broadcastable = None;
4092                 }
4093
4094                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4095                 // (and we assume the user never directly broadcasts the funding transaction and waits for
4096                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4097                 // * an inbound channel that failed to persist the monitor on funding_created and we got
4098                 //   the funding transaction confirmed before the monitor was persisted, or
4099                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4100                 let channel_ready = if self.context.monitor_pending_channel_ready {
4101                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4102                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4103                         self.context.monitor_pending_channel_ready = false;
4104                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4105                         Some(msgs::ChannelReady {
4106                                 channel_id: self.context.channel_id(),
4107                                 next_per_commitment_point,
4108                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4109                         })
4110                 } else { None };
4111
4112                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4113
4114                 let mut accepted_htlcs = Vec::new();
4115                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4116                 let mut failed_htlcs = Vec::new();
4117                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4118                 let mut finalized_claimed_htlcs = Vec::new();
4119                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4120
4121                 if self.context.channel_state.is_peer_disconnected() {
4122                         self.context.monitor_pending_revoke_and_ack = false;
4123                         self.context.monitor_pending_commitment_signed = false;
4124                         return MonitorRestoreUpdates {
4125                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4126                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4127                         };
4128                 }
4129
4130                 let raa = if self.context.monitor_pending_revoke_and_ack {
4131                         Some(self.get_last_revoke_and_ack())
4132                 } else { None };
4133                 let commitment_update = if self.context.monitor_pending_commitment_signed {
4134                         self.get_last_commitment_update_for_send(logger).ok()
4135                 } else { None };
4136                 if commitment_update.is_some() {
4137                         self.mark_awaiting_response();
4138                 }
4139
4140                 self.context.monitor_pending_revoke_and_ack = false;
4141                 self.context.monitor_pending_commitment_signed = false;
4142                 let order = self.context.resend_order.clone();
4143                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4144                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4145                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4146                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4147                 MonitorRestoreUpdates {
4148                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4149                 }
4150         }
4151
4152         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4153                 where F::Target: FeeEstimator, L::Target: Logger
4154         {
4155                 if self.context.is_outbound() {
4156                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4157                 }
4158                 if self.context.channel_state.is_peer_disconnected() {
4159                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4160                 }
4161                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4162
4163                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4164                 self.context.update_time_counter += 1;
4165                 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4166                 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4167                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4168                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4169                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4170                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4171                         let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4172                         if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4173                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4174                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
4175                         }
4176                         if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4177                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4178                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4179                         }
4180                 }
4181                 Ok(())
4182         }
4183
4184         /// Indicates that the signer may have some signatures for us, so we should retry if we're
4185         /// blocked.
4186         #[allow(unused)]
4187         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4188                 let commitment_update = if self.context.signer_pending_commitment_update {
4189                         self.get_last_commitment_update_for_send(logger).ok()
4190                 } else { None };
4191                 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4192                         self.context.get_funding_signed_msg(logger).1
4193                 } else { None };
4194                 let channel_ready = if funding_signed.is_some() {
4195                         self.check_get_channel_ready(0)
4196                 } else { None };
4197
4198                 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
4199                         if commitment_update.is_some() { "a" } else { "no" },
4200                         if funding_signed.is_some() { "a" } else { "no" },
4201                         if channel_ready.is_some() { "a" } else { "no" });
4202
4203                 SignerResumeUpdates {
4204                         commitment_update,
4205                         funding_signed,
4206                         channel_ready,
4207                 }
4208         }
4209
4210         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
4211                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4212                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
4213                 msgs::RevokeAndACK {
4214                         channel_id: self.context.channel_id,
4215                         per_commitment_secret,
4216                         next_per_commitment_point,
4217                         #[cfg(taproot)]
4218                         next_local_nonce: None,
4219                 }
4220         }
4221
4222         /// Gets the last commitment update for immediate sending to our peer.
4223         fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
4224                 let mut update_add_htlcs = Vec::new();
4225                 let mut update_fulfill_htlcs = Vec::new();
4226                 let mut update_fail_htlcs = Vec::new();
4227                 let mut update_fail_malformed_htlcs = Vec::new();
4228
4229                 for htlc in self.context.pending_outbound_htlcs.iter() {
4230                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
4231                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
4232                                         channel_id: self.context.channel_id(),
4233                                         htlc_id: htlc.htlc_id,
4234                                         amount_msat: htlc.amount_msat,
4235                                         payment_hash: htlc.payment_hash,
4236                                         cltv_expiry: htlc.cltv_expiry,
4237                                         onion_routing_packet: (**onion_packet).clone(),
4238                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
4239                                         blinding_point: htlc.blinding_point,
4240                                 });
4241                         }
4242                 }
4243
4244                 for htlc in self.context.pending_inbound_htlcs.iter() {
4245                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4246                                 match reason {
4247                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
4248                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
4249                                                         channel_id: self.context.channel_id(),
4250                                                         htlc_id: htlc.htlc_id,
4251                                                         reason: err_packet.clone()
4252                                                 });
4253                                         },
4254                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
4255                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
4256                                                         channel_id: self.context.channel_id(),
4257                                                         htlc_id: htlc.htlc_id,
4258                                                         sha256_of_onion: sha256_of_onion.clone(),
4259                                                         failure_code: failure_code.clone(),
4260                                                 });
4261                                         },
4262                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
4263                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
4264                                                         channel_id: self.context.channel_id(),
4265                                                         htlc_id: htlc.htlc_id,
4266                                                         payment_preimage: payment_preimage.clone(),
4267                                                 });
4268                                         },
4269                                 }
4270                         }
4271                 }
4272
4273                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
4274                         Some(msgs::UpdateFee {
4275                                 channel_id: self.context.channel_id(),
4276                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
4277                         })
4278                 } else { None };
4279
4280                 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
4281                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
4282                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
4283                 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
4284                         if self.context.signer_pending_commitment_update {
4285                                 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
4286                                 self.context.signer_pending_commitment_update = false;
4287                         }
4288                         update
4289                 } else {
4290                         if !self.context.signer_pending_commitment_update {
4291                                 log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
4292                                 self.context.signer_pending_commitment_update = true;
4293                         }
4294                         return Err(());
4295                 };
4296                 Ok(msgs::CommitmentUpdate {
4297                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
4298                         commitment_signed,
4299                 })
4300         }
4301
4302         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
4303         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
4304                 if self.context.channel_state.is_local_shutdown_sent() {
4305                         assert!(self.context.shutdown_scriptpubkey.is_some());
4306                         Some(msgs::Shutdown {
4307                                 channel_id: self.context.channel_id,
4308                                 scriptpubkey: self.get_closing_scriptpubkey(),
4309                         })
4310                 } else { None }
4311         }
4312
4313         /// May panic if some calls other than message-handling calls (which will all Err immediately)
4314         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
4315         ///
4316         /// Some links printed in log lines are included here to check them during build (when run with
4317         /// `cargo doc --document-private-items`):
4318         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
4319         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
4320         pub fn channel_reestablish<L: Deref, NS: Deref>(
4321                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
4322                 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
4323         ) -> Result<ReestablishResponses, ChannelError>
4324         where
4325                 L::Target: Logger,
4326                 NS::Target: NodeSigner
4327         {
4328                 if !self.context.channel_state.is_peer_disconnected() {
4329                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
4330                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
4331                         // just close here instead of trying to recover.
4332                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
4333                 }
4334
4335                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
4336                         msg.next_local_commitment_number == 0 {
4337                         return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
4338                 }
4339
4340                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
4341                 if msg.next_remote_commitment_number > 0 {
4342                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
4343                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
4344                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
4345                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
4346                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
4347                         }
4348                         if msg.next_remote_commitment_number > our_commitment_transaction {
4349                                 macro_rules! log_and_panic {
4350                                         ($err_msg: expr) => {
4351                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4352                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
4353                                         }
4354                                 }
4355                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
4356                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
4357                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
4358                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
4359                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
4360                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
4361                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
4362                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
4363                         }
4364                 }
4365
4366                 // Before we change the state of the channel, we check if the peer is sending a very old
4367                 // commitment transaction number, if yes we send a warning message.
4368                 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
4369                         return Err(ChannelError::Warn(format!(
4370                                 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
4371                                 msg.next_remote_commitment_number,
4372                                 our_commitment_transaction
4373                         )));
4374                 }
4375
4376                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
4377                 // remaining cases either succeed or ErrorMessage-fail).
4378                 self.context.channel_state.clear_peer_disconnected();
4379                 self.context.sent_message_awaiting_response = None;
4380
4381                 let shutdown_msg = self.get_outbound_shutdown();
4382
4383                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
4384
4385                 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
4386                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
4387                         if !self.context.channel_state.is_our_channel_ready() ||
4388                                         self.context.channel_state.is_monitor_update_in_progress() {
4389                                 if msg.next_remote_commitment_number != 0 {
4390                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
4391                                 }
4392                                 // Short circuit the whole handler as there is nothing we can resend them
4393                                 return Ok(ReestablishResponses {
4394                                         channel_ready: None,
4395                                         raa: None, commitment_update: None,
4396                                         order: RAACommitmentOrder::CommitmentFirst,
4397                                         shutdown_msg, announcement_sigs,
4398                                 });
4399                         }
4400
4401                         // We have OurChannelReady set!
4402                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4403                         return Ok(ReestablishResponses {
4404                                 channel_ready: Some(msgs::ChannelReady {
4405                                         channel_id: self.context.channel_id(),
4406                                         next_per_commitment_point,
4407                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
4408                                 }),
4409                                 raa: None, commitment_update: None,
4410                                 order: RAACommitmentOrder::CommitmentFirst,
4411                                 shutdown_msg, announcement_sigs,
4412                         });
4413                 }
4414
4415                 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
4416                         // Remote isn't waiting on any RevokeAndACK from us!
4417                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
4418                         None
4419                 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
4420                         if self.context.channel_state.is_monitor_update_in_progress() {
4421                                 self.context.monitor_pending_revoke_and_ack = true;
4422                                 None
4423                         } else {
4424                                 Some(self.get_last_revoke_and_ack())
4425                         }
4426                 } else {
4427                         debug_assert!(false, "All values should have been handled in the four cases above");
4428                         return Err(ChannelError::Close(format!(
4429                                 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
4430                                 msg.next_remote_commitment_number,
4431                                 our_commitment_transaction
4432                         )));
4433                 };
4434
4435                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
4436                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
4437                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
4438                 // the corresponding revoke_and_ack back yet.
4439                 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
4440                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
4441                         self.mark_awaiting_response();
4442                 }
4443                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
4444
4445                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
4446                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
4447                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4448                         Some(msgs::ChannelReady {
4449                                 channel_id: self.context.channel_id(),
4450                                 next_per_commitment_point,
4451                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4452                         })
4453                 } else { None };
4454
4455                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
4456                         if required_revoke.is_some() {
4457                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
4458                         } else {
4459                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
4460                         }
4461
4462                         Ok(ReestablishResponses {
4463                                 channel_ready, shutdown_msg, announcement_sigs,
4464                                 raa: required_revoke,
4465                                 commitment_update: None,
4466                                 order: self.context.resend_order.clone(),
4467                         })
4468                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
4469                         if required_revoke.is_some() {
4470                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
4471                         } else {
4472                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
4473                         }
4474
4475                         if self.context.channel_state.is_monitor_update_in_progress() {
4476                                 self.context.monitor_pending_commitment_signed = true;
4477                                 Ok(ReestablishResponses {
4478                                         channel_ready, shutdown_msg, announcement_sigs,
4479                                         commitment_update: None, raa: None,
4480                                         order: self.context.resend_order.clone(),
4481                                 })
4482                         } else {
4483                                 Ok(ReestablishResponses {
4484                                         channel_ready, shutdown_msg, announcement_sigs,
4485                                         raa: required_revoke,
4486                                         commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
4487                                         order: self.context.resend_order.clone(),
4488                                 })
4489                         }
4490                 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
4491                         Err(ChannelError::Close(format!(
4492                                 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
4493                                 msg.next_local_commitment_number,
4494                                 next_counterparty_commitment_number,
4495                         )))
4496                 } else {
4497                         Err(ChannelError::Close(format!(
4498                                 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
4499                                 msg.next_local_commitment_number,
4500                                 next_counterparty_commitment_number,
4501                         )))
4502                 }
4503         }
4504
4505         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
4506         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
4507         /// at which point they will be recalculated.
4508         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
4509                 -> (u64, u64)
4510                 where F::Target: FeeEstimator
4511         {
4512                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
4513
4514                 // Propose a range from our current Background feerate to our Normal feerate plus our
4515                 // force_close_avoidance_max_fee_satoshis.
4516                 // If we fail to come to consensus, we'll have to force-close.
4517                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
4518                 // Use NonAnchorChannelFee because this should be an estimate for a channel close
4519                 // that we don't expect to need fee bumping
4520                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
4521                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
4522
4523                 // The spec requires that (when the channel does not have anchors) we only send absolute
4524                 // channel fees no greater than the absolute channel fee on the current commitment
4525                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
4526                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
4527                 // some force-closure by old nodes, but we wanted to close the channel anyway.
4528
4529                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
4530                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
4531                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
4532                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
4533                 }
4534
4535                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
4536                 // below our dust limit, causing the output to disappear. We don't bother handling this
4537                 // case, however, as this should only happen if a channel is closed before any (material)
4538                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
4539                 // come to consensus with our counterparty on appropriate fees, however it should be a
4540                 // relatively rare case. We can revisit this later, though note that in order to determine
4541                 // if the funders' output is dust we have to know the absolute fee we're going to use.
4542                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
4543                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
4544                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
4545                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
4546                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
4547                                 // target feerate-calculated fee.
4548                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
4549                                         proposed_max_feerate as u64 * tx_weight / 1000)
4550                         } else {
4551                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
4552                         };
4553
4554                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
4555                 self.context.closing_fee_limits.clone().unwrap()
4556         }
4557
4558         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
4559         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
4560         /// this point if we're the funder we should send the initial closing_signed, and in any case
4561         /// shutdown should complete within a reasonable timeframe.
4562         fn closing_negotiation_ready(&self) -> bool {
4563                 self.context.closing_negotiation_ready()
4564         }
4565
4566         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
4567         /// an Err if no progress is being made and the channel should be force-closed instead.
4568         /// Should be called on a one-minute timer.
4569         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
4570                 if self.closing_negotiation_ready() {
4571                         if self.context.closing_signed_in_flight {
4572                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
4573                         } else {
4574                                 self.context.closing_signed_in_flight = true;
4575                         }
4576                 }
4577                 Ok(())
4578         }
4579
4580         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
4581                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4582                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4583                 where F::Target: FeeEstimator, L::Target: Logger
4584         {
4585                 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
4586                 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
4587                 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
4588                 // that closing_negotiation_ready checks this case (as well as a few others).
4589                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
4590                         return Ok((None, None, None));
4591                 }
4592
4593                 if !self.context.is_outbound() {
4594                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
4595                                 return self.closing_signed(fee_estimator, &msg);
4596                         }
4597                         return Ok((None, None, None));
4598                 }
4599
4600                 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
4601                 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
4602                 if self.context.expecting_peer_commitment_signed {
4603                         return Ok((None, None, None));
4604                 }
4605
4606                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4607
4608                 assert!(self.context.shutdown_scriptpubkey.is_some());
4609                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
4610                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
4611                         our_min_fee, our_max_fee, total_fee_satoshis);
4612
4613                 match &self.context.holder_signer {
4614                         ChannelSignerType::Ecdsa(ecdsa) => {
4615                                 let sig = ecdsa
4616                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4617                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
4618
4619                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
4620                                 Ok((Some(msgs::ClosingSigned {
4621                                         channel_id: self.context.channel_id,
4622                                         fee_satoshis: total_fee_satoshis,
4623                                         signature: sig,
4624                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4625                                                 min_fee_satoshis: our_min_fee,
4626                                                 max_fee_satoshis: our_max_fee,
4627                                         }),
4628                                 }), None, None))
4629                         },
4630                         // TODO (taproot|arik)
4631                         #[cfg(taproot)]
4632                         _ => todo!()
4633                 }
4634         }
4635
4636         // Marks a channel as waiting for a response from the counterparty. If it's not received
4637         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
4638         // a reconnection.
4639         fn mark_awaiting_response(&mut self) {
4640                 self.context.sent_message_awaiting_response = Some(0);
4641         }
4642
4643         /// Determines whether we should disconnect the counterparty due to not receiving a response
4644         /// within our expected timeframe.
4645         ///
4646         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
4647         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
4648                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
4649                         ticks_elapsed
4650                 } else {
4651                         // Don't disconnect when we're not waiting on a response.
4652                         return false;
4653                 };
4654                 *ticks_elapsed += 1;
4655                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
4656         }
4657
4658         pub fn shutdown(
4659                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4660         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4661         {
4662                 if self.context.channel_state.is_peer_disconnected() {
4663                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
4664                 }
4665                 if self.context.channel_state.is_pre_funded_state() {
4666                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
4667                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
4668                         // can do that via error message without getting a connection fail anyway...
4669                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
4670                 }
4671                 for htlc in self.context.pending_inbound_htlcs.iter() {
4672                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
4673                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
4674                         }
4675                 }
4676                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4677
4678                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
4679                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
4680                 }
4681
4682                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
4683                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
4684                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
4685                         }
4686                 } else {
4687                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
4688                 }
4689
4690                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
4691                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
4692                 // any further commitment updates after we set LocalShutdownSent.
4693                 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
4694
4695                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
4696                         Some(_) => false,
4697                         None => {
4698                                 assert!(send_shutdown);
4699                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
4700                                         Ok(scriptpubkey) => scriptpubkey,
4701                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
4702                                 };
4703                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
4704                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
4705                                 }
4706                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
4707                                 true
4708                         },
4709                 };
4710
4711                 // From here on out, we may not fail!
4712
4713                 self.context.channel_state.set_remote_shutdown_sent();
4714                 self.context.update_time_counter += 1;
4715
4716                 let monitor_update = if update_shutdown_script {
4717                         self.context.latest_monitor_update_id += 1;
4718                         let monitor_update = ChannelMonitorUpdate {
4719                                 update_id: self.context.latest_monitor_update_id,
4720                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
4721                                         scriptpubkey: self.get_closing_scriptpubkey(),
4722                                 }],
4723                         };
4724                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4725                         self.push_ret_blockable_mon_update(monitor_update)
4726                 } else { None };
4727                 let shutdown = if send_shutdown {
4728                         Some(msgs::Shutdown {
4729                                 channel_id: self.context.channel_id,
4730                                 scriptpubkey: self.get_closing_scriptpubkey(),
4731                         })
4732                 } else { None };
4733
4734                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
4735                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
4736                 // cell HTLCs and return them to fail the payment.
4737                 self.context.holding_cell_update_fee = None;
4738                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
4739                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
4740                         match htlc_update {
4741                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
4742                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
4743                                         false
4744                                 },
4745                                 _ => true
4746                         }
4747                 });
4748
4749                 self.context.channel_state.set_local_shutdown_sent();
4750                 self.context.update_time_counter += 1;
4751
4752                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
4753         }
4754
4755         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
4756                 let mut tx = closing_tx.trust().built_transaction().clone();
4757
4758                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
4759
4760                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
4761                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
4762                 let mut holder_sig = sig.serialize_der().to_vec();
4763                 holder_sig.push(EcdsaSighashType::All as u8);
4764                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
4765                 cp_sig.push(EcdsaSighashType::All as u8);
4766                 if funding_key[..] < counterparty_funding_key[..] {
4767                         tx.input[0].witness.push(holder_sig);
4768                         tx.input[0].witness.push(cp_sig);
4769                 } else {
4770                         tx.input[0].witness.push(cp_sig);
4771                         tx.input[0].witness.push(holder_sig);
4772                 }
4773
4774                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
4775                 tx
4776         }
4777
4778         pub fn closing_signed<F: Deref>(
4779                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
4780                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
4781                 where F::Target: FeeEstimator
4782         {
4783                 if !self.context.channel_state.is_both_sides_shutdown() {
4784                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
4785                 }
4786                 if self.context.channel_state.is_peer_disconnected() {
4787                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
4788                 }
4789                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
4790                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
4791                 }
4792                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
4793                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
4794                 }
4795
4796                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
4797                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
4798                 }
4799
4800                 if self.context.channel_state.is_monitor_update_in_progress() {
4801                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
4802                         return Ok((None, None, None));
4803                 }
4804
4805                 let funding_redeemscript = self.context.get_funding_redeemscript();
4806                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
4807                 if used_total_fee != msg.fee_satoshis {
4808                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
4809                 }
4810                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4811
4812                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
4813                         Ok(_) => {},
4814                         Err(_e) => {
4815                                 // The remote end may have decided to revoke their output due to inconsistent dust
4816                                 // limits, so check for that case by re-checking the signature here.
4817                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
4818                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
4819                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
4820                         },
4821                 };
4822
4823                 for outp in closing_tx.trust().built_transaction().output.iter() {
4824                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
4825                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
4826                         }
4827                 }
4828
4829                 assert!(self.context.shutdown_scriptpubkey.is_some());
4830                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
4831                         if last_fee == msg.fee_satoshis {
4832                                 let shutdown_result = ShutdownResult {
4833                                         monitor_update: None,
4834                                         dropped_outbound_htlcs: Vec::new(),
4835                                         unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4836                                         channel_id: self.context.channel_id,
4837                                         counterparty_node_id: self.context.counterparty_node_id,
4838                                 };
4839                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
4840                                 self.context.channel_state = ChannelState::ShutdownComplete;
4841                                 self.context.update_time_counter += 1;
4842                                 return Ok((None, Some(tx), Some(shutdown_result)));
4843                         }
4844                 }
4845
4846                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
4847
4848                 macro_rules! propose_fee {
4849                         ($new_fee: expr) => {
4850                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
4851                                         (closing_tx, $new_fee)
4852                                 } else {
4853                                         self.build_closing_transaction($new_fee, false)
4854                                 };
4855
4856                                 return match &self.context.holder_signer {
4857                                         ChannelSignerType::Ecdsa(ecdsa) => {
4858                                                 let sig = ecdsa
4859                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
4860                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
4861                                                 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
4862                                                         let shutdown_result = ShutdownResult {
4863                                                                 monitor_update: None,
4864                                                                 dropped_outbound_htlcs: Vec::new(),
4865                                                                 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
4866                                                                 channel_id: self.context.channel_id,
4867                                                                 counterparty_node_id: self.context.counterparty_node_id,
4868                                                         };
4869                                                         self.context.channel_state = ChannelState::ShutdownComplete;
4870                                                         self.context.update_time_counter += 1;
4871                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
4872                                                         (Some(tx), Some(shutdown_result))
4873                                                 } else {
4874                                                         (None, None)
4875                                                 };
4876
4877                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
4878                                                 Ok((Some(msgs::ClosingSigned {
4879                                                         channel_id: self.context.channel_id,
4880                                                         fee_satoshis: used_fee,
4881                                                         signature: sig,
4882                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
4883                                                                 min_fee_satoshis: our_min_fee,
4884                                                                 max_fee_satoshis: our_max_fee,
4885                                                         }),
4886                                                 }), signed_tx, shutdown_result))
4887                                         },
4888                                         // TODO (taproot|arik)
4889                                         #[cfg(taproot)]
4890                                         _ => todo!()
4891                                 }
4892                         }
4893                 }
4894
4895                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
4896                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
4897                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
4898                         }
4899                         if max_fee_satoshis < our_min_fee {
4900                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
4901                         }
4902                         if min_fee_satoshis > our_max_fee {
4903                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
4904                         }
4905
4906                         if !self.context.is_outbound() {
4907                                 // They have to pay, so pick the highest fee in the overlapping range.
4908                                 // We should never set an upper bound aside from their full balance
4909                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
4910                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
4911                         } else {
4912                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
4913                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
4914                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
4915                                 }
4916                                 // The proposed fee is in our acceptable range, accept it and broadcast!
4917                                 propose_fee!(msg.fee_satoshis);
4918                         }
4919                 } else {
4920                         // Old fee style negotiation. We don't bother to enforce whether they are complying
4921                         // with the "making progress" requirements, we just comply and hope for the best.
4922                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
4923                                 if msg.fee_satoshis > last_fee {
4924                                         if msg.fee_satoshis < our_max_fee {
4925                                                 propose_fee!(msg.fee_satoshis);
4926                                         } else if last_fee < our_max_fee {
4927                                                 propose_fee!(our_max_fee);
4928                                         } else {
4929                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
4930                                         }
4931                                 } else {
4932                                         if msg.fee_satoshis > our_min_fee {
4933                                                 propose_fee!(msg.fee_satoshis);
4934                                         } else if last_fee > our_min_fee {
4935                                                 propose_fee!(our_min_fee);
4936                                         } else {
4937                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
4938                                         }
4939                                 }
4940                         } else {
4941                                 if msg.fee_satoshis < our_min_fee {
4942                                         propose_fee!(our_min_fee);
4943                                 } else if msg.fee_satoshis > our_max_fee {
4944                                         propose_fee!(our_max_fee);
4945                                 } else {
4946                                         propose_fee!(msg.fee_satoshis);
4947                                 }
4948                         }
4949                 }
4950         }
4951
4952         fn internal_htlc_satisfies_config(
4953                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
4954         ) -> Result<(), (&'static str, u16)> {
4955                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
4956                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
4957                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
4958                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
4959                         return Err((
4960                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
4961                                 0x1000 | 12, // fee_insufficient
4962                         ));
4963                 }
4964                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
4965                         return Err((
4966                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
4967                                 0x1000 | 13, // incorrect_cltv_expiry
4968                         ));
4969                 }
4970                 Ok(())
4971         }
4972
4973         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
4974         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
4975         /// unsuccessful, falls back to the previous one if one exists.
4976         pub fn htlc_satisfies_config(
4977                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
4978         ) -> Result<(), (&'static str, u16)> {
4979                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
4980                         .or_else(|err| {
4981                                 if let Some(prev_config) = self.context.prev_config() {
4982                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
4983                                 } else {
4984                                         Err(err)
4985                                 }
4986                         })
4987         }
4988
4989         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
4990                 self.context.cur_holder_commitment_transaction_number + 1
4991         }
4992
4993         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
4994                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
4995         }
4996
4997         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
4998                 self.context.cur_counterparty_commitment_transaction_number + 2
4999         }
5000
5001         #[cfg(test)]
5002         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5003                 &self.context.holder_signer
5004         }
5005
5006         #[cfg(test)]
5007         pub fn get_value_stat(&self) -> ChannelValueStat {
5008                 ChannelValueStat {
5009                         value_to_self_msat: self.context.value_to_self_msat,
5010                         channel_value_msat: self.context.channel_value_satoshis * 1000,
5011                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5012                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5013                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5014                         holding_cell_outbound_amount_msat: {
5015                                 let mut res = 0;
5016                                 for h in self.context.holding_cell_htlc_updates.iter() {
5017                                         match h {
5018                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5019                                                         res += amount_msat;
5020                                                 }
5021                                                 _ => {}
5022                                         }
5023                                 }
5024                                 res
5025                         },
5026                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5027                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5028                 }
5029         }
5030
5031         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5032         /// Allowed in any state (including after shutdown)
5033         pub fn is_awaiting_monitor_update(&self) -> bool {
5034                 self.context.channel_state.is_monitor_update_in_progress()
5035         }
5036
5037         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5038         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5039                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5040                 self.context.blocked_monitor_updates[0].update.update_id - 1
5041         }
5042
5043         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5044         /// further blocked monitor update exists after the next.
5045         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5046                 if self.context.blocked_monitor_updates.is_empty() { return None; }
5047                 Some((self.context.blocked_monitor_updates.remove(0).update,
5048                         !self.context.blocked_monitor_updates.is_empty()))
5049         }
5050
5051         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5052         /// immediately given to the user for persisting or `None` if it should be held as blocked.
5053         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5054         -> Option<ChannelMonitorUpdate> {
5055                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5056                 if !release_monitor {
5057                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5058                                 update,
5059                         });
5060                         None
5061                 } else {
5062                         Some(update)
5063                 }
5064         }
5065
5066         pub fn blocked_monitor_updates_pending(&self) -> usize {
5067                 self.context.blocked_monitor_updates.len()
5068         }
5069
5070         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5071         /// If the channel is outbound, this implies we have not yet broadcasted the funding
5072         /// transaction. If the channel is inbound, this implies simply that the channel has not
5073         /// advanced state.
5074         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5075                 if !self.is_awaiting_monitor_update() { return false; }
5076                 if matches!(
5077                         self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5078                         if (flags & !(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH)).is_empty()
5079                 ) {
5080                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5081                         // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5082                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5083                         return true;
5084                 }
5085                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5086                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5087                         // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5088                         // waiting for the initial monitor persistence. Thus, we check if our commitment
5089                         // transaction numbers have both been iterated only exactly once (for the
5090                         // funding_signed), and we're awaiting monitor update.
5091                         //
5092                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5093                         // only way to get an awaiting-monitor-update state during initial funding is if the
5094                         // initial monitor persistence is still pending).
5095                         //
5096                         // Because deciding we're awaiting initial broadcast spuriously could result in
5097                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5098                         // we hard-assert here, even in production builds.
5099                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5100                         assert!(self.context.monitor_pending_channel_ready);
5101                         assert_eq!(self.context.latest_monitor_update_id, 0);
5102                         return true;
5103                 }
5104                 false
5105         }
5106
5107         /// Returns true if our channel_ready has been sent
5108         pub fn is_our_channel_ready(&self) -> bool {
5109                 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5110                         matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5111         }
5112
5113         /// Returns true if our peer has either initiated or agreed to shut down the channel.
5114         pub fn received_shutdown(&self) -> bool {
5115                 self.context.channel_state.is_remote_shutdown_sent()
5116         }
5117
5118         /// Returns true if we either initiated or agreed to shut down the channel.
5119         pub fn sent_shutdown(&self) -> bool {
5120                 self.context.channel_state.is_local_shutdown_sent()
5121         }
5122
5123         /// Returns true if this channel is fully shut down. True here implies that no further actions
5124         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5125         /// will be handled appropriately by the chain monitor.
5126         pub fn is_shutdown(&self) -> bool {
5127                 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5128         }
5129
5130         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5131                 self.context.channel_update_status
5132         }
5133
5134         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5135                 self.context.update_time_counter += 1;
5136                 self.context.channel_update_status = status;
5137         }
5138
5139         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5140                 // Called:
5141                 //  * always when a new block/transactions are confirmed with the new height
5142                 //  * when funding is signed with a height of 0
5143                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5144                         return None;
5145                 }
5146
5147                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5148                 if funding_tx_confirmations <= 0 {
5149                         self.context.funding_tx_confirmation_height = 0;
5150                 }
5151
5152                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5153                         return None;
5154                 }
5155
5156                 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5157                 // channel_ready yet.
5158                 if self.context.signer_pending_funding {
5159                         return None;
5160                 }
5161
5162                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5163                 // channel_ready until the entire batch is ready.
5164                 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if (f & !FundedStateFlags::ALL).is_empty()) {
5165                         self.context.channel_state.set_our_channel_ready();
5166                         true
5167                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5168                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5169                         self.context.update_time_counter += 1;
5170                         true
5171                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f & !FundedStateFlags::ALL == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
5172                         // We got a reorg but not enough to trigger a force close, just ignore.
5173                         false
5174                 } else {
5175                         if self.context.funding_tx_confirmation_height != 0 &&
5176                                 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
5177                         {
5178                                 // We should never see a funding transaction on-chain until we've received
5179                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
5180                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
5181                                 // however, may do this and we shouldn't treat it as a bug.
5182                                 #[cfg(not(fuzzing))]
5183                                 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
5184                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
5185                                         self.context.channel_state.to_u32());
5186                         }
5187                         // We got a reorg but not enough to trigger a force close, just ignore.
5188                         false
5189                 };
5190
5191                 if need_commitment_update {
5192                         if !self.context.channel_state.is_monitor_update_in_progress() {
5193                                 if !self.context.channel_state.is_peer_disconnected() {
5194                                         let next_per_commitment_point =
5195                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
5196                                         return Some(msgs::ChannelReady {
5197                                                 channel_id: self.context.channel_id,
5198                                                 next_per_commitment_point,
5199                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5200                                         });
5201                                 }
5202                         } else {
5203                                 self.context.monitor_pending_channel_ready = true;
5204                         }
5205                 }
5206                 None
5207         }
5208
5209         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
5210         /// In the first case, we store the confirmation height and calculating the short channel id.
5211         /// In the second, we simply return an Err indicating we need to be force-closed now.
5212         pub fn transactions_confirmed<NS: Deref, L: Deref>(
5213                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
5214                 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
5215         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5216         where
5217                 NS::Target: NodeSigner,
5218                 L::Target: Logger
5219         {
5220                 let mut msgs = (None, None);
5221                 if let Some(funding_txo) = self.context.get_funding_txo() {
5222                         for &(index_in_block, tx) in txdata.iter() {
5223                                 // Check if the transaction is the expected funding transaction, and if it is,
5224                                 // check that it pays the right amount to the right script.
5225                                 if self.context.funding_tx_confirmation_height == 0 {
5226                                         if tx.txid() == funding_txo.txid {
5227                                                 let txo_idx = funding_txo.index as usize;
5228                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
5229                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
5230                                                         if self.context.is_outbound() {
5231                                                                 // If we generated the funding transaction and it doesn't match what it
5232                                                                 // should, the client is really broken and we should just panic and
5233                                                                 // tell them off. That said, because hash collisions happen with high
5234                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
5235                                                                 // channel and move on.
5236                                                                 #[cfg(not(fuzzing))]
5237                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5238                                                         }
5239                                                         self.context.update_time_counter += 1;
5240                                                         let err_reason = "funding tx had wrong script/value or output index";
5241                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
5242                                                 } else {
5243                                                         if self.context.is_outbound() {
5244                                                                 if !tx.is_coin_base() {
5245                                                                         for input in tx.input.iter() {
5246                                                                                 if input.witness.is_empty() {
5247                                                                                         // We generated a malleable funding transaction, implying we've
5248                                                                                         // just exposed ourselves to funds loss to our counterparty.
5249                                                                                         #[cfg(not(fuzzing))]
5250                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
5251                                                                                 }
5252                                                                         }
5253                                                                 }
5254                                                         }
5255                                                         self.context.funding_tx_confirmation_height = height;
5256                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
5257                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
5258                                                                 Ok(scid) => Some(scid),
5259                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
5260                                                         }
5261                                                 }
5262                                                 // If this is a coinbase transaction and not a 0-conf channel
5263                                                 // we should update our min_depth to 100 to handle coinbase maturity
5264                                                 if tx.is_coin_base() &&
5265                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
5266                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
5267                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
5268                                                 }
5269                                         }
5270                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
5271                                         // send it immediately instead of waiting for a best_block_updated call (which
5272                                         // may have already happened for this block).
5273                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
5274                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5275                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
5276                                                 msgs = (Some(channel_ready), announcement_sigs);
5277                                         }
5278                                 }
5279                                 for inp in tx.input.iter() {
5280                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
5281                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
5282                                                 return Err(ClosureReason::CommitmentTxConfirmed);
5283                                         }
5284                                 }
5285                         }
5286                 }
5287                 Ok(msgs)
5288         }
5289
5290         /// When a new block is connected, we check the height of the block against outbound holding
5291         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
5292         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
5293         /// handled by the ChannelMonitor.
5294         ///
5295         /// If we return Err, the channel may have been closed, at which point the standard
5296         /// requirements apply - no calls may be made except those explicitly stated to be allowed
5297         /// post-shutdown.
5298         ///
5299         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
5300         /// back.
5301         pub fn best_block_updated<NS: Deref, L: Deref>(
5302                 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
5303                 node_signer: &NS, user_config: &UserConfig, logger: &L
5304         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5305         where
5306                 NS::Target: NodeSigner,
5307                 L::Target: Logger
5308         {
5309                 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
5310         }
5311
5312         fn do_best_block_updated<NS: Deref, L: Deref>(
5313                 &mut self, height: u32, highest_header_time: u32,
5314                 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
5315         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
5316         where
5317                 NS::Target: NodeSigner,
5318                 L::Target: Logger
5319         {
5320                 let mut timed_out_htlcs = Vec::new();
5321                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
5322                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
5323                 // ~now.
5324                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
5325                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5326                         match htlc_update {
5327                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
5328                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
5329                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
5330                                                 false
5331                                         } else { true }
5332                                 },
5333                                 _ => true
5334                         }
5335                 });
5336
5337                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
5338
5339                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
5340                         let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5341                                 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5342                         } else { None };
5343                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
5344                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
5345                 }
5346
5347                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5348                         self.context.channel_state.is_our_channel_ready() {
5349                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5350                         if self.context.funding_tx_confirmation_height == 0 {
5351                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
5352                                 // zero if it has been reorged out, however in either case, our state flags
5353                                 // indicate we've already sent a channel_ready
5354                                 funding_tx_confirmations = 0;
5355                         }
5356
5357                         // If we've sent channel_ready (or have both sent and received channel_ready), and
5358                         // the funding transaction has become unconfirmed,
5359                         // close the channel and hope we can get the latest state on chain (because presumably
5360                         // the funding transaction is at least still in the mempool of most nodes).
5361                         //
5362                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
5363                         // 0-conf channel, but not doing so may lead to the
5364                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
5365                         // to.
5366                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
5367                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
5368                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
5369                                 return Err(ClosureReason::ProcessingError { err: err_reason });
5370                         }
5371                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
5372                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
5373                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
5374                         // If funding_tx_confirmed_in is unset, the channel must not be active
5375                         assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
5376                         assert!(!self.context.channel_state.is_our_channel_ready());
5377                         return Err(ClosureReason::FundingTimedOut);
5378                 }
5379
5380                 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
5381                         self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
5382                 } else { None };
5383                 Ok((None, timed_out_htlcs, announcement_sigs))
5384         }
5385
5386         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
5387         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
5388         /// before the channel has reached channel_ready and we can just wait for more blocks.
5389         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
5390                 if self.context.funding_tx_confirmation_height != 0 {
5391                         // We handle the funding disconnection by calling best_block_updated with a height one
5392                         // below where our funding was connected, implying a reorg back to conf_height - 1.
5393                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
5394                         // We use the time field to bump the current time we set on channel updates if its
5395                         // larger. If we don't know that time has moved forward, we can just set it to the last
5396                         // time we saw and it will be ignored.
5397                         let best_time = self.context.update_time_counter;
5398                         match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&NodeSigner, &UserConfig)>, logger) {
5399                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
5400                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
5401                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
5402                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
5403                                         Ok(())
5404                                 },
5405                                 Err(e) => Err(e)
5406                         }
5407                 } else {
5408                         // We never learned about the funding confirmation anyway, just ignore
5409                         Ok(())
5410                 }
5411         }
5412
5413         // Methods to get unprompted messages to send to the remote end (or where we already returned
5414         // something in the handler for the message that prompted this message):
5415
5416         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
5417         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
5418         /// directions). Should be used for both broadcasted announcements and in response to an
5419         /// AnnouncementSignatures message from the remote peer.
5420         ///
5421         /// Will only fail if we're not in a state where channel_announcement may be sent (including
5422         /// closing).
5423         ///
5424         /// This will only return ChannelError::Ignore upon failure.
5425         ///
5426         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
5427         fn get_channel_announcement<NS: Deref>(
5428                 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5429         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5430                 if !self.context.config.announced_channel {
5431                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
5432                 }
5433                 if !self.context.is_usable() {
5434                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
5435                 }
5436
5437                 let short_channel_id = self.context.get_short_channel_id()
5438                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
5439                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5440                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
5441                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
5442                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
5443
5444                 let msg = msgs::UnsignedChannelAnnouncement {
5445                         features: channelmanager::provided_channel_features(&user_config),
5446                         chain_hash,
5447                         short_channel_id,
5448                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
5449                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
5450                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
5451                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
5452                         excess_data: Vec::new(),
5453                 };
5454
5455                 Ok(msg)
5456         }
5457
5458         fn get_announcement_sigs<NS: Deref, L: Deref>(
5459                 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
5460                 best_block_height: u32, logger: &L
5461         ) -> Option<msgs::AnnouncementSignatures>
5462         where
5463                 NS::Target: NodeSigner,
5464                 L::Target: Logger
5465         {
5466                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5467                         return None;
5468                 }
5469
5470                 if !self.context.is_usable() {
5471                         return None;
5472                 }
5473
5474                 if self.context.channel_state.is_peer_disconnected() {
5475                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
5476                         return None;
5477                 }
5478
5479                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
5480                         return None;
5481                 }
5482
5483                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
5484                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5485                         Ok(a) => a,
5486                         Err(e) => {
5487                                 log_trace!(logger, "{:?}", e);
5488                                 return None;
5489                         }
5490                 };
5491                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
5492                         Err(_) => {
5493                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
5494                                 return None;
5495                         },
5496                         Ok(v) => v
5497                 };
5498                 match &self.context.holder_signer {
5499                         ChannelSignerType::Ecdsa(ecdsa) => {
5500                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
5501                                         Err(_) => {
5502                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
5503                                                 return None;
5504                                         },
5505                                         Ok(v) => v
5506                                 };
5507                                 let short_channel_id = match self.context.get_short_channel_id() {
5508                                         Some(scid) => scid,
5509                                         None => return None,
5510                                 };
5511
5512                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
5513
5514                                 Some(msgs::AnnouncementSignatures {
5515                                         channel_id: self.context.channel_id(),
5516                                         short_channel_id,
5517                                         node_signature: our_node_sig,
5518                                         bitcoin_signature: our_bitcoin_sig,
5519                                 })
5520                         },
5521                         // TODO (taproot|arik)
5522                         #[cfg(taproot)]
5523                         _ => todo!()
5524                 }
5525         }
5526
5527         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
5528         /// available.
5529         fn sign_channel_announcement<NS: Deref>(
5530                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
5531         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5532                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
5533                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
5534                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
5535                         let were_node_one = announcement.node_id_1 == our_node_key;
5536
5537                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
5538                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
5539                         match &self.context.holder_signer {
5540                                 ChannelSignerType::Ecdsa(ecdsa) => {
5541                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
5542                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
5543                                         Ok(msgs::ChannelAnnouncement {
5544                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
5545                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
5546                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
5547                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
5548                                                 contents: announcement,
5549                                         })
5550                                 },
5551                                 // TODO (taproot|arik)
5552                                 #[cfg(taproot)]
5553                                 _ => todo!()
5554                         }
5555                 } else {
5556                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
5557                 }
5558         }
5559
5560         /// Processes an incoming announcement_signatures message, providing a fully-signed
5561         /// channel_announcement message which we can broadcast and storing our counterparty's
5562         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
5563         pub fn announcement_signatures<NS: Deref>(
5564                 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
5565                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
5566         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
5567                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
5568
5569                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
5570
5571                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
5572                         return Err(ChannelError::Close(format!(
5573                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
5574                                  &announcement, self.context.get_counterparty_node_id())));
5575                 }
5576                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
5577                         return Err(ChannelError::Close(format!(
5578                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
5579                                 &announcement, self.context.counterparty_funding_pubkey())));
5580                 }
5581
5582                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
5583                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5584                         return Err(ChannelError::Ignore(
5585                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
5586                 }
5587
5588                 self.sign_channel_announcement(node_signer, announcement)
5589         }
5590
5591         /// Gets a signed channel_announcement for this channel, if we previously received an
5592         /// announcement_signatures from our counterparty.
5593         pub fn get_signed_channel_announcement<NS: Deref>(
5594                 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
5595         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
5596                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
5597                         return None;
5598                 }
5599                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
5600                         Ok(res) => res,
5601                         Err(_) => return None,
5602                 };
5603                 match self.sign_channel_announcement(node_signer, announcement) {
5604                         Ok(res) => Some(res),
5605                         Err(_) => None,
5606                 }
5607         }
5608
5609         /// May panic if called on a channel that wasn't immediately-previously
5610         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
5611         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
5612                 assert!(self.context.channel_state.is_peer_disconnected());
5613                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
5614                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
5615                 // current to_remote balances. However, it no longer has any use, and thus is now simply
5616                 // set to a dummy (but valid, as required by the spec) public key.
5617                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
5618                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
5619                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
5620                 let mut pk = [2; 33]; pk[1] = 0xff;
5621                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
5622                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
5623                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
5624                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
5625                         remote_last_secret
5626                 } else {
5627                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
5628                         [0;32]
5629                 };
5630                 self.mark_awaiting_response();
5631                 msgs::ChannelReestablish {
5632                         channel_id: self.context.channel_id(),
5633                         // The protocol has two different commitment number concepts - the "commitment
5634                         // transaction number", which starts from 0 and counts up, and the "revocation key
5635                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
5636                         // commitment transaction numbers by the index which will be used to reveal the
5637                         // revocation key for that commitment transaction, which means we have to convert them
5638                         // to protocol-level commitment numbers here...
5639
5640                         // next_local_commitment_number is the next commitment_signed number we expect to
5641                         // receive (indicating if they need to resend one that we missed).
5642                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
5643                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
5644                         // receive, however we track it by the next commitment number for a remote transaction
5645                         // (which is one further, as they always revoke previous commitment transaction, not
5646                         // the one we send) so we have to decrement by 1. Note that if
5647                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
5648                         // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
5649                         // overflow here.
5650                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
5651                         your_last_per_commitment_secret: remote_last_secret,
5652                         my_current_per_commitment_point: dummy_pubkey,
5653                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
5654                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
5655                         // txid of that interactive transaction, else we MUST NOT set it.
5656                         next_funding_txid: None,
5657                 }
5658         }
5659
5660
5661         // Send stuff to our remote peers:
5662
5663         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
5664         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5665         /// commitment update.
5666         ///
5667         /// `Err`s will only be [`ChannelError::Ignore`].
5668         pub fn queue_add_htlc<F: Deref, L: Deref>(
5669                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5670                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5671                 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5672         ) -> Result<(), ChannelError>
5673         where F::Target: FeeEstimator, L::Target: Logger
5674         {
5675                 self
5676                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
5677                                 skimmed_fee_msat, blinding_point, fee_estimator, logger)
5678                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
5679                         .map_err(|err| {
5680                                 if let ChannelError::Ignore(_) = err { /* fine */ }
5681                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
5682                                 err
5683                         })
5684         }
5685
5686         /// Adds a pending outbound HTLC to this channel, note that you probably want
5687         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
5688         ///
5689         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
5690         /// the wire:
5691         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
5692         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
5693         ///   awaiting ACK.
5694         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
5695         ///   we may not yet have sent the previous commitment update messages and will need to
5696         ///   regenerate them.
5697         ///
5698         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
5699         /// on this [`Channel`] if `force_holding_cell` is false.
5700         ///
5701         /// `Err`s will only be [`ChannelError::Ignore`].
5702         fn send_htlc<F: Deref, L: Deref>(
5703                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
5704                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
5705                 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
5706                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5707         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
5708         where F::Target: FeeEstimator, L::Target: Logger
5709         {
5710                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
5711                         self.context.channel_state.is_local_shutdown_sent() ||
5712                         self.context.channel_state.is_remote_shutdown_sent()
5713                 {
5714                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
5715                 }
5716                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
5717                 if amount_msat > channel_total_msat {
5718                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
5719                 }
5720
5721                 if amount_msat == 0 {
5722                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
5723                 }
5724
5725                 let available_balances = self.context.get_available_balances(fee_estimator);
5726                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
5727                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
5728                                 available_balances.next_outbound_htlc_minimum_msat)));
5729                 }
5730
5731                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
5732                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
5733                                 available_balances.next_outbound_htlc_limit_msat)));
5734                 }
5735
5736                 if self.context.channel_state.is_peer_disconnected() {
5737                         // Note that this should never really happen, if we're !is_live() on receipt of an
5738                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
5739                         // the user to send directly into a !is_live() channel. However, if we
5740                         // disconnected during the time the previous hop was doing the commitment dance we may
5741                         // end up getting here after the forwarding delay. In any case, returning an
5742                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
5743                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
5744                 }
5745
5746                 let need_holding_cell = self.context.channel_state.should_force_holding_cell();
5747                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
5748                         payment_hash, amount_msat,
5749                         if force_holding_cell { "into holding cell" }
5750                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
5751                         else { "to peer" });
5752
5753                 if need_holding_cell {
5754                         force_holding_cell = true;
5755                 }
5756
5757                 // Now update local state:
5758                 if force_holding_cell {
5759                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
5760                                 amount_msat,
5761                                 payment_hash,
5762                                 cltv_expiry,
5763                                 source,
5764                                 onion_routing_packet,
5765                                 skimmed_fee_msat,
5766                                 blinding_point,
5767                         });
5768                         return Ok(None);
5769                 }
5770
5771                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
5772                         htlc_id: self.context.next_holder_htlc_id,
5773                         amount_msat,
5774                         payment_hash: payment_hash.clone(),
5775                         cltv_expiry,
5776                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
5777                         source,
5778                         blinding_point,
5779                         skimmed_fee_msat,
5780                 });
5781
5782                 let res = msgs::UpdateAddHTLC {
5783                         channel_id: self.context.channel_id,
5784                         htlc_id: self.context.next_holder_htlc_id,
5785                         amount_msat,
5786                         payment_hash,
5787                         cltv_expiry,
5788                         onion_routing_packet,
5789                         skimmed_fee_msat,
5790                         blinding_point,
5791                 };
5792                 self.context.next_holder_htlc_id += 1;
5793
5794                 Ok(Some(res))
5795         }
5796
5797         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
5798                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
5799                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
5800                 // fail to generate this, we still are at least at a position where upgrading their status
5801                 // is acceptable.
5802                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
5803                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
5804                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
5805                         } else { None };
5806                         if let Some(state) = new_state {
5807                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
5808                                 htlc.state = state;
5809                         }
5810                 }
5811                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5812                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
5813                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
5814                                 // Grab the preimage, if it exists, instead of cloning
5815                                 let mut reason = OutboundHTLCOutcome::Success(None);
5816                                 mem::swap(outcome, &mut reason);
5817                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
5818                         }
5819                 }
5820                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5821                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
5822                                 debug_assert!(!self.context.is_outbound());
5823                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5824                                 self.context.feerate_per_kw = feerate;
5825                                 self.context.pending_update_fee = None;
5826                         }
5827                 }
5828                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
5829
5830                 let (mut htlcs_ref, counterparty_commitment_tx) =
5831                         self.build_commitment_no_state_update(logger);
5832                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
5833                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
5834                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
5835
5836                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
5837                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
5838                 }
5839
5840                 self.context.latest_monitor_update_id += 1;
5841                 let monitor_update = ChannelMonitorUpdate {
5842                         update_id: self.context.latest_monitor_update_id,
5843                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
5844                                 commitment_txid: counterparty_commitment_txid,
5845                                 htlc_outputs: htlcs.clone(),
5846                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
5847                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
5848                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
5849                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
5850                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
5851                         }]
5852                 };
5853                 self.context.channel_state.set_awaiting_remote_revoke();
5854                 monitor_update
5855         }
5856
5857         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
5858         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
5859         where L::Target: Logger
5860         {
5861                 let counterparty_keys = self.context.build_remote_transaction_keys();
5862                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5863                 let counterparty_commitment_tx = commitment_stats.tx;
5864
5865                 #[cfg(any(test, fuzzing))]
5866                 {
5867                         if !self.context.is_outbound() {
5868                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
5869                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
5870                                 if let Some(info) = projected_commit_tx_info {
5871                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
5872                                         if info.total_pending_htlcs == total_pending_htlcs
5873                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
5874                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
5875                                                 && info.feerate == self.context.feerate_per_kw {
5876                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
5877                                                         assert_eq!(actual_fee, info.fee);
5878                                                 }
5879                                 }
5880                         }
5881                 }
5882
5883                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
5884         }
5885
5886         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
5887         /// generation when we shouldn't change HTLC/channel state.
5888         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
5889                 // Get the fee tests from `build_commitment_no_state_update`
5890                 #[cfg(any(test, fuzzing))]
5891                 self.build_commitment_no_state_update(logger);
5892
5893                 let counterparty_keys = self.context.build_remote_transaction_keys();
5894                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
5895                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
5896
5897                 match &self.context.holder_signer {
5898                         ChannelSignerType::Ecdsa(ecdsa) => {
5899                                 let (signature, htlc_signatures);
5900
5901                                 {
5902                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
5903                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
5904                                                 htlcs.push(htlc);
5905                                         }
5906
5907                                         let res = ecdsa.sign_counterparty_commitment(
5908                                                         &commitment_stats.tx,
5909                                                         commitment_stats.inbound_htlc_preimages,
5910                                                         commitment_stats.outbound_htlc_preimages,
5911                                                         &self.context.secp_ctx,
5912                                                 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
5913                                         signature = res.0;
5914                                         htlc_signatures = res.1;
5915
5916                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
5917                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
5918                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
5919                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
5920
5921                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
5922                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
5923                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
5924                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
5925                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
5926                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
5927                                         }
5928                                 }
5929
5930                                 Ok((msgs::CommitmentSigned {
5931                                         channel_id: self.context.channel_id,
5932                                         signature,
5933                                         htlc_signatures,
5934                                         #[cfg(taproot)]
5935                                         partial_signature_with_nonce: None,
5936                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
5937                         },
5938                         // TODO (taproot|arik)
5939                         #[cfg(taproot)]
5940                         _ => todo!()
5941                 }
5942         }
5943
5944         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
5945         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
5946         ///
5947         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
5948         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
5949         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
5950                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
5951                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
5952                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5953         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
5954         where F::Target: FeeEstimator, L::Target: Logger
5955         {
5956                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
5957                         onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
5958                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
5959                 match send_res? {
5960                         Some(_) => {
5961                                 let monitor_update = self.build_commitment_no_status_check(logger);
5962                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
5963                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
5964                         },
5965                         None => Ok(None)
5966                 }
5967         }
5968
5969         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
5970         /// happened.
5971         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
5972                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
5973                         fee_base_msat: msg.contents.fee_base_msat,
5974                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
5975                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
5976                 });
5977                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
5978                 if did_change {
5979                         self.context.counterparty_forwarding_info = new_forwarding_info;
5980                 }
5981
5982                 Ok(did_change)
5983         }
5984
5985         /// Begins the shutdown process, getting a message for the remote peer and returning all
5986         /// holding cell HTLCs for payment failure.
5987         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
5988                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
5989         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
5990         {
5991                 for htlc in self.context.pending_outbound_htlcs.iter() {
5992                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
5993                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
5994                         }
5995                 }
5996                 if self.context.channel_state.is_local_shutdown_sent() {
5997                         return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
5998                 }
5999                 else if self.context.channel_state.is_remote_shutdown_sent() {
6000                         return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6001                 }
6002                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6003                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6004                 }
6005                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6006                 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6007                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6008                 }
6009
6010                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6011                         Some(_) => false,
6012                         None => {
6013                                 // use override shutdown script if provided
6014                                 let shutdown_scriptpubkey = match override_shutdown_script {
6015                                         Some(script) => script,
6016                                         None => {
6017                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
6018                                                 match signer_provider.get_shutdown_scriptpubkey() {
6019                                                         Ok(scriptpubkey) => scriptpubkey,
6020                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6021                                                 }
6022                                         },
6023                                 };
6024                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
6025                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6026                                 }
6027                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6028                                 true
6029                         },
6030                 };
6031
6032                 // From here on out, we may not fail!
6033                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6034                 self.context.channel_state.set_local_shutdown_sent();
6035                 self.context.update_time_counter += 1;
6036
6037                 let monitor_update = if update_shutdown_script {
6038                         self.context.latest_monitor_update_id += 1;
6039                         let monitor_update = ChannelMonitorUpdate {
6040                                 update_id: self.context.latest_monitor_update_id,
6041                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6042                                         scriptpubkey: self.get_closing_scriptpubkey(),
6043                                 }],
6044                         };
6045                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6046                         self.push_ret_blockable_mon_update(monitor_update)
6047                 } else { None };
6048                 let shutdown = msgs::Shutdown {
6049                         channel_id: self.context.channel_id,
6050                         scriptpubkey: self.get_closing_scriptpubkey(),
6051                 };
6052
6053                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6054                 // our shutdown until we've committed all of the pending changes.
6055                 self.context.holding_cell_update_fee = None;
6056                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6057                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6058                         match htlc_update {
6059                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6060                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6061                                         false
6062                                 },
6063                                 _ => true
6064                         }
6065                 });
6066
6067                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6068                         "we can't both complete shutdown and return a monitor update");
6069
6070                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6071         }
6072
6073         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6074                 self.context.holding_cell_htlc_updates.iter()
6075                         .flat_map(|htlc_update| {
6076                                 match htlc_update {
6077                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6078                                                 => Some((source, payment_hash)),
6079                                         _ => None,
6080                                 }
6081                         })
6082                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6083         }
6084 }
6085
6086 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6087 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6088         pub context: ChannelContext<SP>,
6089         pub unfunded_context: UnfundedChannelContext,
6090 }
6091
6092 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6093         pub fn new<ES: Deref, F: Deref>(
6094                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6095                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6096                 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6097         ) -> Result<OutboundV1Channel<SP>, APIError>
6098         where ES::Target: EntropySource,
6099               F::Target: FeeEstimator
6100         {
6101                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6102                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6103                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6104                 let pubkeys = holder_signer.pubkeys().clone();
6105
6106                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6107                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6108                 }
6109                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6110                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6111                 }
6112                 let channel_value_msat = channel_value_satoshis * 1000;
6113                 if push_msat > channel_value_msat {
6114                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6115                 }
6116                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6117                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6118                 }
6119                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6120                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6121                         // Protocol level safety check in place, although it should never happen because
6122                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6123                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6124                 }
6125
6126                 let channel_type = Self::get_initial_channel_type(&config, their_features);
6127                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6128
6129                 let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6130                         (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6131                 } else {
6132                         (ConfirmationTarget::NonAnchorChannelFee, 0)
6133                 };
6134                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6135
6136                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6137                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6138                 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6139                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6140                 }
6141
6142                 let mut secp_ctx = Secp256k1::new();
6143                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6144
6145                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6146                         match signer_provider.get_shutdown_scriptpubkey() {
6147                                 Ok(scriptpubkey) => Some(scriptpubkey),
6148                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6149                         }
6150                 } else { None };
6151
6152                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6153                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6154                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6155                         }
6156                 }
6157
6158                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6159                         Ok(script) => script,
6160                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6161                 };
6162
6163                 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6164
6165                 Ok(Self {
6166                         context: ChannelContext {
6167                                 user_id,
6168
6169                                 config: LegacyChannelConfig {
6170                                         options: config.channel_config.clone(),
6171                                         announced_channel: config.channel_handshake_config.announced_channel,
6172                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6173                                 },
6174
6175                                 prev_config: None,
6176
6177                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
6178
6179                                 channel_id: temporary_channel_id,
6180                                 temporary_channel_id: Some(temporary_channel_id),
6181                                 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
6182                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6183                                 secp_ctx,
6184                                 channel_value_satoshis,
6185
6186                                 latest_monitor_update_id: 0,
6187
6188                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6189                                 shutdown_scriptpubkey,
6190                                 destination_script,
6191
6192                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6193                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6194                                 value_to_self_msat,
6195
6196                                 pending_inbound_htlcs: Vec::new(),
6197                                 pending_outbound_htlcs: Vec::new(),
6198                                 holding_cell_htlc_updates: Vec::new(),
6199                                 pending_update_fee: None,
6200                                 holding_cell_update_fee: None,
6201                                 next_holder_htlc_id: 0,
6202                                 next_counterparty_htlc_id: 0,
6203                                 update_time_counter: 1,
6204
6205                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6206
6207                                 monitor_pending_channel_ready: false,
6208                                 monitor_pending_revoke_and_ack: false,
6209                                 monitor_pending_commitment_signed: false,
6210                                 monitor_pending_forwards: Vec::new(),
6211                                 monitor_pending_failures: Vec::new(),
6212                                 monitor_pending_finalized_fulfills: Vec::new(),
6213
6214                                 signer_pending_commitment_update: false,
6215                                 signer_pending_funding: false,
6216
6217                                 #[cfg(debug_assertions)]
6218                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6219                                 #[cfg(debug_assertions)]
6220                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
6221
6222                                 last_sent_closing_fee: None,
6223                                 pending_counterparty_closing_signed: None,
6224                                 expecting_peer_commitment_signed: false,
6225                                 closing_fee_limits: None,
6226                                 target_closing_feerate_sats_per_kw: None,
6227
6228                                 funding_tx_confirmed_in: None,
6229                                 funding_tx_confirmation_height: 0,
6230                                 short_channel_id: None,
6231                                 channel_creation_height: current_chain_height,
6232
6233                                 feerate_per_kw: commitment_feerate,
6234                                 counterparty_dust_limit_satoshis: 0,
6235                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
6236                                 counterparty_max_htlc_value_in_flight_msat: 0,
6237                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
6238                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
6239                                 holder_selected_channel_reserve_satoshis,
6240                                 counterparty_htlc_minimum_msat: 0,
6241                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
6242                                 counterparty_max_accepted_htlcs: 0,
6243                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
6244                                 minimum_depth: None, // Filled in in accept_channel
6245
6246                                 counterparty_forwarding_info: None,
6247
6248                                 channel_transaction_parameters: ChannelTransactionParameters {
6249                                         holder_pubkeys: pubkeys,
6250                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
6251                                         is_outbound_from_holder: true,
6252                                         counterparty_parameters: None,
6253                                         funding_outpoint: None,
6254                                         channel_type_features: channel_type.clone()
6255                                 },
6256                                 funding_transaction: None,
6257                                 is_batch_funding: None,
6258
6259                                 counterparty_cur_commitment_point: None,
6260                                 counterparty_prev_commitment_point: None,
6261                                 counterparty_node_id,
6262
6263                                 counterparty_shutdown_scriptpubkey: None,
6264
6265                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
6266
6267                                 channel_update_status: ChannelUpdateStatus::Enabled,
6268                                 closing_signed_in_flight: false,
6269
6270                                 announcement_sigs: None,
6271
6272                                 #[cfg(any(test, fuzzing))]
6273                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
6274                                 #[cfg(any(test, fuzzing))]
6275                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
6276
6277                                 workaround_lnd_bug_4006: None,
6278                                 sent_message_awaiting_response: None,
6279
6280                                 latest_inbound_scid_alias: None,
6281                                 outbound_scid_alias,
6282
6283                                 channel_pending_event_emitted: false,
6284                                 channel_ready_event_emitted: false,
6285
6286                                 #[cfg(any(test, fuzzing))]
6287                                 historical_inbound_htlc_fulfills: HashSet::new(),
6288
6289                                 channel_type,
6290                                 channel_keys_id,
6291
6292                                 blocked_monitor_updates: Vec::new(),
6293                         },
6294                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
6295                 })
6296         }
6297
6298         /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
6299         fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6300                 let counterparty_keys = self.context.build_remote_transaction_keys();
6301                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6302                 let signature = match &self.context.holder_signer {
6303                         // TODO (taproot|arik): move match into calling method for Taproot
6304                         ChannelSignerType::Ecdsa(ecdsa) => {
6305                                 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
6306                                         .map(|(sig, _)| sig).ok()?
6307                         },
6308                         // TODO (taproot|arik)
6309                         #[cfg(taproot)]
6310                         _ => todo!()
6311                 };
6312
6313                 if self.context.signer_pending_funding {
6314                         log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
6315                         self.context.signer_pending_funding = false;
6316                 }
6317
6318                 Some(msgs::FundingCreated {
6319                         temporary_channel_id: self.context.temporary_channel_id.unwrap(),
6320                         funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
6321                         funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
6322                         signature,
6323                         #[cfg(taproot)]
6324                         partial_signature_with_nonce: None,
6325                         #[cfg(taproot)]
6326                         next_local_nonce: None,
6327                 })
6328         }
6329
6330         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
6331         /// a funding_created message for the remote peer.
6332         /// Panics if called at some time other than immediately after initial handshake, if called twice,
6333         /// or if called on an inbound channel.
6334         /// Note that channel_id changes during this call!
6335         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
6336         /// If an Err is returned, it is a ChannelError::Close.
6337         pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
6338         -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
6339                 if !self.context.is_outbound() {
6340                         panic!("Tried to create outbound funding_created message on an inbound channel!");
6341                 }
6342                 if !matches!(
6343                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6344                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
6345                 ) {
6346                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
6347                 }
6348                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6349                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6350                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6351                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6352                 }
6353
6354                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
6355                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
6356
6357                 // Now that we're past error-generating stuff, update our local state:
6358
6359                 self.context.channel_state = ChannelState::FundingNegotiated;
6360                 self.context.channel_id = funding_txo.to_channel_id();
6361
6362                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
6363                 // We can skip this if it is a zero-conf channel.
6364                 if funding_transaction.is_coin_base() &&
6365                         self.context.minimum_depth.unwrap_or(0) > 0 &&
6366                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6367                         self.context.minimum_depth = Some(COINBASE_MATURITY);
6368                 }
6369
6370                 self.context.funding_transaction = Some(funding_transaction);
6371                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
6372
6373                 let funding_created = self.get_funding_created_msg(logger);
6374                 if funding_created.is_none() {
6375                         if !self.context.signer_pending_funding {
6376                                 log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
6377                                 self.context.signer_pending_funding = true;
6378                         }
6379                 }
6380
6381                 Ok(funding_created)
6382         }
6383
6384         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
6385                 // The default channel type (ie the first one we try) depends on whether the channel is
6386                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
6387                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
6388                 // with no other changes, and fall back to `only_static_remotekey`.
6389                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
6390                 if !config.channel_handshake_config.announced_channel &&
6391                         config.channel_handshake_config.negotiate_scid_privacy &&
6392                         their_features.supports_scid_privacy() {
6393                         ret.set_scid_privacy_required();
6394                 }
6395
6396                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
6397                 // set it now. If they don't understand it, we'll fall back to our default of
6398                 // `only_static_remotekey`.
6399                 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
6400                         their_features.supports_anchors_zero_fee_htlc_tx() {
6401                         ret.set_anchors_zero_fee_htlc_tx_required();
6402                 }
6403
6404                 ret
6405         }
6406
6407         /// If we receive an error message, it may only be a rejection of the channel type we tried,
6408         /// not of our ability to open any channel at all. Thus, on error, we should first call this
6409         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
6410         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
6411                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
6412         ) -> Result<msgs::OpenChannel, ()>
6413         where
6414                 F::Target: FeeEstimator
6415         {
6416                 if !self.context.is_outbound() ||
6417                         !matches!(
6418                                 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
6419                                 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
6420                         )
6421                 {
6422                         return Err(());
6423                 }
6424                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
6425                         // We've exhausted our options
6426                         return Err(());
6427                 }
6428                 // We support opening a few different types of channels. Try removing our additional
6429                 // features one by one until we've either arrived at our default or the counterparty has
6430                 // accepted one.
6431                 //
6432                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
6433                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
6434                 // checks whether the counterparty supports every feature, this would only happen if the
6435                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
6436                 // whatever reason.
6437                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
6438                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
6439                         self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
6440                         assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
6441                 } else if self.context.channel_type.supports_scid_privacy() {
6442                         self.context.channel_type.clear_scid_privacy();
6443                 } else {
6444                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
6445                 }
6446                 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
6447                 Ok(self.get_open_channel(chain_hash))
6448         }
6449
6450         pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
6451                 if !self.context.is_outbound() {
6452                         panic!("Tried to open a channel for an inbound channel?");
6453                 }
6454                 if self.context.have_received_message() {
6455                         panic!("Cannot generate an open_channel after we've moved forward");
6456                 }
6457
6458                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6459                         panic!("Tried to send an open_channel for a channel that has already advanced");
6460                 }
6461
6462                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
6463                 let keys = self.context.get_holder_pubkeys();
6464
6465                 msgs::OpenChannel {
6466                         chain_hash,
6467                         temporary_channel_id: self.context.channel_id,
6468                         funding_satoshis: self.context.channel_value_satoshis,
6469                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
6470                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
6471                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
6472                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
6473                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
6474                         feerate_per_kw: self.context.feerate_per_kw as u32,
6475                         to_self_delay: self.context.get_holder_selected_contest_delay(),
6476                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
6477                         funding_pubkey: keys.funding_pubkey,
6478                         revocation_basepoint: keys.revocation_basepoint.to_public_key(),
6479                         payment_point: keys.payment_point,
6480                         delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
6481                         htlc_basepoint: keys.htlc_basepoint.to_public_key(),
6482                         first_per_commitment_point,
6483                         channel_flags: if self.context.config.announced_channel {1} else {0},
6484                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
6485                                 Some(script) => script.clone().into_inner(),
6486                                 None => Builder::new().into_script(),
6487                         }),
6488                         channel_type: Some(self.context.channel_type.clone()),
6489                 }
6490         }
6491
6492         // Message handlers
6493         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
6494                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
6495
6496                 // Check sanity of message fields:
6497                 if !self.context.is_outbound() {
6498                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
6499                 }
6500                 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
6501                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
6502                 }
6503                 if msg.dust_limit_satoshis > 21000000 * 100000000 {
6504                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.dust_limit_satoshis)));
6505                 }
6506                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
6507                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
6508                 }
6509                 if msg.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
6510                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
6511                 }
6512                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
6513                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
6514                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
6515                 }
6516                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
6517                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6518                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6519                 }
6520                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6521                 if msg.to_self_delay > max_delay_acceptable {
6522                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.to_self_delay)));
6523                 }
6524                 if msg.max_accepted_htlcs < 1 {
6525                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6526                 }
6527                 if msg.max_accepted_htlcs > MAX_HTLCS {
6528                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6529                 }
6530
6531                 // Now check against optional parameters as set by config...
6532                 if msg.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
6533                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
6534                 }
6535                 if msg.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
6536                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
6537                 }
6538                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
6539                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
6540                 }
6541                 if msg.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
6542                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
6543                 }
6544                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6545                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6546                 }
6547                 if msg.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
6548                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6549                 }
6550                 if msg.minimum_depth > peer_limits.max_minimum_depth {
6551                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.minimum_depth)));
6552                 }
6553
6554                 if let Some(ty) = &msg.channel_type {
6555                         if *ty != self.context.channel_type {
6556                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
6557                         }
6558                 } else if their_features.supports_channel_type() {
6559                         // Assume they've accepted the channel type as they said they understand it.
6560                 } else {
6561                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6562                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6563                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6564                         }
6565                         self.context.channel_type = channel_type.clone();
6566                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
6567                 }
6568
6569                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6570                         match &msg.shutdown_scriptpubkey {
6571                                 &Some(ref script) => {
6572                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6573                                         if script.len() == 0 {
6574                                                 None
6575                                         } else {
6576                                                 if !script::is_bolt2_compliant(&script, their_features) {
6577                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
6578                                                 }
6579                                                 Some(script.clone())
6580                                         }
6581                                 },
6582                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6583                                 &None => {
6584                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6585                                 }
6586                         }
6587                 } else { None };
6588
6589                 self.context.counterparty_dust_limit_satoshis = msg.dust_limit_satoshis;
6590                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
6591                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
6592                 self.context.counterparty_htlc_minimum_msat = msg.htlc_minimum_msat;
6593                 self.context.counterparty_max_accepted_htlcs = msg.max_accepted_htlcs;
6594
6595                 if peer_limits.trust_own_funding_0conf {
6596                         self.context.minimum_depth = Some(msg.minimum_depth);
6597                 } else {
6598                         self.context.minimum_depth = Some(cmp::max(1, msg.minimum_depth));
6599                 }
6600
6601                 let counterparty_pubkeys = ChannelPublicKeys {
6602                         funding_pubkey: msg.funding_pubkey,
6603                         revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6604                         payment_point: msg.payment_point,
6605                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6606                         htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6607                 };
6608
6609                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
6610                         selected_contest_delay: msg.to_self_delay,
6611                         pubkeys: counterparty_pubkeys,
6612                 });
6613
6614                 self.context.counterparty_cur_commitment_point = Some(msg.first_per_commitment_point);
6615                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
6616
6617                 self.context.channel_state = ChannelState::NegotiatingFunding(
6618                         NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6619                 );
6620                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
6621
6622                 Ok(())
6623         }
6624
6625         /// Handles a funding_signed message from the remote end.
6626         /// If this call is successful, broadcast the funding transaction (and not before!)
6627         pub fn funding_signed<L: Deref>(
6628                 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
6629         ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
6630         where
6631                 L::Target: Logger
6632         {
6633                 if !self.context.is_outbound() {
6634                         return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
6635                 }
6636                 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
6637                         return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
6638                 }
6639                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
6640                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
6641                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
6642                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
6643                 }
6644
6645                 let funding_script = self.context.get_funding_redeemscript();
6646
6647                 let counterparty_keys = self.context.build_remote_transaction_keys();
6648                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
6649                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
6650                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
6651
6652                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
6653                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
6654
6655                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
6656                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
6657                 {
6658                         let trusted_tx = initial_commitment_tx.trust();
6659                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
6660                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
6661                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
6662                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6663                                 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
6664                         }
6665                 }
6666
6667                 let holder_commitment_tx = HolderCommitmentTransaction::new(
6668                         initial_commitment_tx,
6669                         msg.signature,
6670                         Vec::new(),
6671                         &self.context.get_holder_pubkeys().funding_pubkey,
6672                         self.context.counterparty_funding_pubkey()
6673                 );
6674
6675                 let validated =
6676                         self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
6677                 if validated.is_err() {
6678                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
6679                 }
6680
6681                 let funding_redeemscript = self.context.get_funding_redeemscript();
6682                 let funding_txo = self.context.get_funding_txo().unwrap();
6683                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
6684                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
6685                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
6686                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
6687                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
6688                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
6689                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
6690                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
6691                                                           &self.context.channel_transaction_parameters,
6692                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
6693                                                           obscure_factor,
6694                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
6695                 channel_monitor.provide_initial_counterparty_commitment_tx(
6696                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
6697                         self.context.cur_counterparty_commitment_transaction_number,
6698                         self.context.counterparty_cur_commitment_point.unwrap(),
6699                         counterparty_initial_commitment_tx.feerate_per_kw(),
6700                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
6701                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
6702
6703                 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
6704                 if self.context.is_batch_funding() {
6705                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
6706                 } else {
6707                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
6708                 }
6709                 self.context.cur_holder_commitment_transaction_number -= 1;
6710                 self.context.cur_counterparty_commitment_transaction_number -= 1;
6711
6712                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
6713
6714                 let mut channel = Channel { context: self.context };
6715
6716                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
6717                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
6718                 Ok((channel, channel_monitor))
6719         }
6720
6721         /// Indicates that the signer may have some signatures for us, so we should retry if we're
6722         /// blocked.
6723         #[allow(unused)]
6724         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
6725                 if self.context.signer_pending_funding && self.context.is_outbound() {
6726                         log_trace!(logger, "Signer unblocked a funding_created");
6727                         self.get_funding_created_msg(logger)
6728                 } else { None }
6729         }
6730 }
6731
6732 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
6733 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6734         pub context: ChannelContext<SP>,
6735         pub unfunded_context: UnfundedChannelContext,
6736 }
6737
6738 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
6739         /// Creates a new channel from a remote sides' request for one.
6740         /// Assumes chain_hash has already been checked and corresponds with what we expect!
6741         pub fn new<ES: Deref, F: Deref, L: Deref>(
6742                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
6743                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
6744                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
6745                 current_chain_height: u32, logger: &L, is_0conf: bool,
6746         ) -> Result<InboundV1Channel<SP>, ChannelError>
6747                 where ES::Target: EntropySource,
6748                           F::Target: FeeEstimator,
6749                           L::Target: Logger,
6750         {
6751                 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.temporary_channel_id));
6752                 let announced_channel = if (msg.channel_flags & 1) == 1 { true } else { false };
6753
6754                 // First check the channel type is known, failing before we do anything else if we don't
6755                 // support this channel type.
6756                 let channel_type = if let Some(channel_type) = &msg.channel_type {
6757                         if channel_type.supports_any_optional_bits() {
6758                                 return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
6759                         }
6760
6761                         // We only support the channel types defined by the `ChannelManager` in
6762                         // `provided_channel_type_features`. The channel type must always support
6763                         // `static_remote_key`.
6764                         if !channel_type.requires_static_remote_key() {
6765                                 return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
6766                         }
6767                         // Make sure we support all of the features behind the channel type.
6768                         if !channel_type.is_subset(our_supported_features) {
6769                                 return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
6770                         }
6771                         if channel_type.requires_scid_privacy() && announced_channel {
6772                                 return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
6773                         }
6774                         channel_type.clone()
6775                 } else {
6776                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
6777                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
6778                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
6779                         }
6780                         channel_type
6781                 };
6782
6783                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, msg.funding_satoshis, user_id);
6784                 let holder_signer = signer_provider.derive_channel_signer(msg.funding_satoshis, channel_keys_id);
6785                 let pubkeys = holder_signer.pubkeys().clone();
6786                 let counterparty_pubkeys = ChannelPublicKeys {
6787                         funding_pubkey: msg.funding_pubkey,
6788                         revocation_basepoint: RevocationBasepoint::from(msg.revocation_basepoint),
6789                         payment_point: msg.payment_point,
6790                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.delayed_payment_basepoint),
6791                         htlc_basepoint: HtlcBasepoint::from(msg.htlc_basepoint)
6792                 };
6793
6794                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
6795                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
6796                 }
6797
6798                 // Check sanity of message fields:
6799                 if msg.funding_satoshis > config.channel_handshake_limits.max_funding_satoshis {
6800                         return Err(ChannelError::Close(format!("Per our config, funding must be at most {}. It was {}", config.channel_handshake_limits.max_funding_satoshis, msg.funding_satoshis)));
6801                 }
6802                 if msg.funding_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6803                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", msg.funding_satoshis)));
6804                 }
6805                 if msg.channel_reserve_satoshis > msg.funding_satoshis {
6806                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be not greater than funding_satoshis: {}", msg.channel_reserve_satoshis, msg.funding_satoshis)));
6807                 }
6808                 let full_channel_value_msat = (msg.funding_satoshis - msg.channel_reserve_satoshis) * 1000;
6809                 if msg.push_msat > full_channel_value_msat {
6810                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg.push_msat, full_channel_value_msat)));
6811                 }
6812                 if msg.dust_limit_satoshis > msg.funding_satoshis {
6813                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than funding_satoshis {}. Peer never wants payout outputs?", msg.dust_limit_satoshis, msg.funding_satoshis)));
6814                 }
6815                 if msg.htlc_minimum_msat >= full_channel_value_msat {
6816                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", msg.htlc_minimum_msat, full_channel_value_msat)));
6817                 }
6818                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, msg.feerate_per_kw, None, &&logger)?;
6819
6820                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
6821                 if msg.to_self_delay > max_counterparty_selected_contest_delay {
6822                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, msg.to_self_delay)));
6823                 }
6824                 if msg.max_accepted_htlcs < 1 {
6825                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
6826                 }
6827                 if msg.max_accepted_htlcs > MAX_HTLCS {
6828                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.max_accepted_htlcs, MAX_HTLCS)));
6829                 }
6830
6831                 // Now check against optional parameters as set by config...
6832                 if msg.funding_satoshis < config.channel_handshake_limits.min_funding_satoshis {
6833                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", msg.funding_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
6834                 }
6835                 if msg.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
6836                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.htlc_minimum_msat,  config.channel_handshake_limits.max_htlc_minimum_msat)));
6837                 }
6838                 if msg.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
6839                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
6840                 }
6841                 if msg.channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
6842                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
6843                 }
6844                 if msg.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
6845                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
6846                 }
6847                 if msg.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6848                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6849                 }
6850                 if msg.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
6851                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
6852                 }
6853
6854                 // Convert things into internal flags and prep our state:
6855
6856                 if config.channel_handshake_limits.force_announced_channel_preference {
6857                         if config.channel_handshake_config.announced_channel != announced_channel {
6858                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
6859                         }
6860                 }
6861
6862                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.funding_satoshis, config);
6863                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6864                         // Protocol level safety check in place, although it should never happen because
6865                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6866                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
6867                 }
6868                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
6869                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg.push_msat)));
6870                 }
6871                 if msg.channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6872                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
6873                                 msg.channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
6874                 }
6875                 if holder_selected_channel_reserve_satoshis < msg.dust_limit_satoshis {
6876                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", msg.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
6877                 }
6878
6879                 // check if the funder's amount for the initial commitment tx is sufficient
6880                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
6881                 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6882                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2
6883                 } else {
6884                         0
6885                 };
6886                 let funders_amount_msat = msg.funding_satoshis * 1000 - msg.push_msat;
6887                 let commitment_tx_fee = commit_tx_fee_msat(msg.feerate_per_kw, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
6888                 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
6889                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
6890                 }
6891
6892                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
6893                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
6894                 // want to push much to us), our counterparty should always have more than our reserve.
6895                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
6896                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
6897                 }
6898
6899                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
6900                         match &msg.shutdown_scriptpubkey {
6901                                 &Some(ref script) => {
6902                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
6903                                         if script.len() == 0 {
6904                                                 None
6905                                         } else {
6906                                                 if !script::is_bolt2_compliant(&script, their_features) {
6907                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
6908                                                 }
6909                                                 Some(script.clone())
6910                                         }
6911                                 },
6912                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
6913                                 &None => {
6914                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
6915                                 }
6916                         }
6917                 } else { None };
6918
6919                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6920                         match signer_provider.get_shutdown_scriptpubkey() {
6921                                 Ok(scriptpubkey) => Some(scriptpubkey),
6922                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
6923                         }
6924                 } else { None };
6925
6926                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6927                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6928                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
6929                         }
6930                 }
6931
6932                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6933                         Ok(script) => script,
6934                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
6935                 };
6936
6937                 let mut secp_ctx = Secp256k1::new();
6938                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6939
6940                 let minimum_depth = if is_0conf {
6941                         Some(0)
6942                 } else {
6943                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
6944                 };
6945
6946                 let chan = Self {
6947                         context: ChannelContext {
6948                                 user_id,
6949
6950                                 config: LegacyChannelConfig {
6951                                         options: config.channel_config.clone(),
6952                                         announced_channel,
6953                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
6954                                 },
6955
6956                                 prev_config: None,
6957
6958                                 inbound_handshake_limits_override: None,
6959
6960                                 temporary_channel_id: Some(msg.temporary_channel_id),
6961                                 channel_id: msg.temporary_channel_id,
6962                                 channel_state: ChannelState::NegotiatingFunding(
6963                                         NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
6964                                 ),
6965                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
6966                                 secp_ctx,
6967
6968                                 latest_monitor_update_id: 0,
6969
6970                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
6971                                 shutdown_scriptpubkey,
6972                                 destination_script,
6973
6974                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6975                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
6976                                 value_to_self_msat: msg.push_msat,
6977
6978                                 pending_inbound_htlcs: Vec::new(),
6979                                 pending_outbound_htlcs: Vec::new(),
6980                                 holding_cell_htlc_updates: Vec::new(),
6981                                 pending_update_fee: None,
6982                                 holding_cell_update_fee: None,
6983                                 next_holder_htlc_id: 0,
6984                                 next_counterparty_htlc_id: 0,
6985                                 update_time_counter: 1,
6986
6987                                 resend_order: RAACommitmentOrder::CommitmentFirst,
6988
6989                                 monitor_pending_channel_ready: false,
6990                                 monitor_pending_revoke_and_ack: false,
6991                                 monitor_pending_commitment_signed: false,
6992                                 monitor_pending_forwards: Vec::new(),
6993                                 monitor_pending_failures: Vec::new(),
6994                                 monitor_pending_finalized_fulfills: Vec::new(),
6995
6996                                 signer_pending_commitment_update: false,
6997                                 signer_pending_funding: false,
6998
6999                                 #[cfg(debug_assertions)]
7000                                 holder_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7001                                 #[cfg(debug_assertions)]
7002                                 counterparty_max_commitment_tx_output: Mutex::new((msg.push_msat, msg.funding_satoshis * 1000 - msg.push_msat)),
7003
7004                                 last_sent_closing_fee: None,
7005                                 pending_counterparty_closing_signed: None,
7006                                 expecting_peer_commitment_signed: false,
7007                                 closing_fee_limits: None,
7008                                 target_closing_feerate_sats_per_kw: None,
7009
7010                                 funding_tx_confirmed_in: None,
7011                                 funding_tx_confirmation_height: 0,
7012                                 short_channel_id: None,
7013                                 channel_creation_height: current_chain_height,
7014
7015                                 feerate_per_kw: msg.feerate_per_kw,
7016                                 channel_value_satoshis: msg.funding_satoshis,
7017                                 counterparty_dust_limit_satoshis: msg.dust_limit_satoshis,
7018                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7019                                 counterparty_max_htlc_value_in_flight_msat: cmp::min(msg.max_htlc_value_in_flight_msat, msg.funding_satoshis * 1000),
7020                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(msg.funding_satoshis, &config.channel_handshake_config),
7021                                 counterparty_selected_channel_reserve_satoshis: Some(msg.channel_reserve_satoshis),
7022                                 holder_selected_channel_reserve_satoshis,
7023                                 counterparty_htlc_minimum_msat: msg.htlc_minimum_msat,
7024                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7025                                 counterparty_max_accepted_htlcs: msg.max_accepted_htlcs,
7026                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7027                                 minimum_depth,
7028
7029                                 counterparty_forwarding_info: None,
7030
7031                                 channel_transaction_parameters: ChannelTransactionParameters {
7032                                         holder_pubkeys: pubkeys,
7033                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7034                                         is_outbound_from_holder: false,
7035                                         counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
7036                                                 selected_contest_delay: msg.to_self_delay,
7037                                                 pubkeys: counterparty_pubkeys,
7038                                         }),
7039                                         funding_outpoint: None,
7040                                         channel_type_features: channel_type.clone()
7041                                 },
7042                                 funding_transaction: None,
7043                                 is_batch_funding: None,
7044
7045                                 counterparty_cur_commitment_point: Some(msg.first_per_commitment_point),
7046                                 counterparty_prev_commitment_point: None,
7047                                 counterparty_node_id,
7048
7049                                 counterparty_shutdown_scriptpubkey,
7050
7051                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7052
7053                                 channel_update_status: ChannelUpdateStatus::Enabled,
7054                                 closing_signed_in_flight: false,
7055
7056                                 announcement_sigs: None,
7057
7058                                 #[cfg(any(test, fuzzing))]
7059                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7060                                 #[cfg(any(test, fuzzing))]
7061                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7062
7063                                 workaround_lnd_bug_4006: None,
7064                                 sent_message_awaiting_response: None,
7065
7066                                 latest_inbound_scid_alias: None,
7067                                 outbound_scid_alias: 0,
7068
7069                                 channel_pending_event_emitted: false,
7070                                 channel_ready_event_emitted: false,
7071
7072                                 #[cfg(any(test, fuzzing))]
7073                                 historical_inbound_htlc_fulfills: HashSet::new(),
7074
7075                                 channel_type,
7076                                 channel_keys_id,
7077
7078                                 blocked_monitor_updates: Vec::new(),
7079                         },
7080                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7081                 };
7082
7083                 Ok(chan)
7084         }
7085
7086         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7087         /// should be sent back to the counterparty node.
7088         ///
7089         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7090         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7091                 if self.context.is_outbound() {
7092                         panic!("Tried to send accept_channel for an outbound channel?");
7093                 }
7094                 if !matches!(
7095                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7096                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7097                 ) {
7098                         panic!("Tried to send accept_channel after channel had moved forward");
7099                 }
7100                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7101                         panic!("Tried to send an accept_channel for a channel that has already advanced");
7102                 }
7103
7104                 self.generate_accept_channel_message()
7105         }
7106
7107         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7108         /// inbound channel. If the intention is to accept an inbound channel, use
7109         /// [`InboundV1Channel::accept_inbound_channel`] instead.
7110         ///
7111         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7112         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7113                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7114                 let keys = self.context.get_holder_pubkeys();
7115
7116                 msgs::AcceptChannel {
7117                         temporary_channel_id: self.context.channel_id,
7118                         dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7119                         max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7120                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7121                         htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7122                         minimum_depth: self.context.minimum_depth.unwrap(),
7123                         to_self_delay: self.context.get_holder_selected_contest_delay(),
7124                         max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7125                         funding_pubkey: keys.funding_pubkey,
7126                         revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7127                         payment_point: keys.payment_point,
7128                         delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7129                         htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7130                         first_per_commitment_point,
7131                         shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7132                                 Some(script) => script.clone().into_inner(),
7133                                 None => Builder::new().into_script(),
7134                         }),
7135                         channel_type: Some(self.context.channel_type.clone()),
7136                         #[cfg(taproot)]
7137                         next_local_nonce: None,
7138                 }
7139         }
7140
7141         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7142         /// inbound channel without accepting it.
7143         ///
7144         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7145         #[cfg(test)]
7146         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7147                 self.generate_accept_channel_message()
7148         }
7149
7150         fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7151                 let funding_script = self.context.get_funding_redeemscript();
7152
7153                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7154                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7155                 let trusted_tx = initial_commitment_tx.trust();
7156                 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7157                 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7158                 // They sign the holder commitment transaction...
7159                 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7160                         log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7161                         encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7162                         encode::serialize_hex(&funding_script), &self.context.channel_id());
7163                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7164
7165                 Ok(initial_commitment_tx)
7166         }
7167
7168         pub fn funding_created<L: Deref>(
7169                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7170         ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7171         where
7172                 L::Target: Logger
7173         {
7174                 if self.context.is_outbound() {
7175                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7176                 }
7177                 if !matches!(
7178                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7179                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7180                 ) {
7181                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7182                         // remember the channel, so it's safe to just send an error_message here and drop the
7183                         // channel.
7184                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7185                 }
7186                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7187                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7188                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7189                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7190                 }
7191
7192                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7193                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7194                 // This is an externally observable change before we finish all our checks.  In particular
7195                 // check_funding_created_signature may fail.
7196                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7197
7198                 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7199                         Ok(res) => res,
7200                         Err(ChannelError::Close(e)) => {
7201                                 self.context.channel_transaction_parameters.funding_outpoint = None;
7202                                 return Err((self, ChannelError::Close(e)));
7203                         },
7204                         Err(e) => {
7205                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
7206                                 // to make sure we don't continue with an inconsistent state.
7207                                 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7208                         }
7209                 };
7210
7211                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7212                         initial_commitment_tx,
7213                         msg.signature,
7214                         Vec::new(),
7215                         &self.context.get_holder_pubkeys().funding_pubkey,
7216                         self.context.counterparty_funding_pubkey()
7217                 );
7218
7219                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7220                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7221                 }
7222
7223                 // Now that we're past error-generating stuff, update our local state:
7224
7225                 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7226                 self.context.channel_id = funding_txo.to_channel_id();
7227                 self.context.cur_counterparty_commitment_transaction_number -= 1;
7228                 self.context.cur_holder_commitment_transaction_number -= 1;
7229
7230                 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7231
7232                 let funding_redeemscript = self.context.get_funding_redeemscript();
7233                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7234                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7235                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7236                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7237                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7238                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7239                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
7240                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7241                                                           &self.context.channel_transaction_parameters,
7242                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
7243                                                           obscure_factor,
7244                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id);
7245                 channel_monitor.provide_initial_counterparty_commitment_tx(
7246                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7247                         self.context.cur_counterparty_commitment_transaction_number + 1,
7248                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7249                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7250                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7251
7252                 log_info!(logger, "{} funding_signed for peer for channel {}",
7253                         if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7254
7255                 // Promote the channel to a full-fledged one now that we have updated the state and have a
7256                 // `ChannelMonitor`.
7257                 let mut channel = Channel {
7258                         context: self.context,
7259                 };
7260                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7261                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7262
7263                 Ok((channel, funding_signed, channel_monitor))
7264         }
7265 }
7266
7267 const SERIALIZATION_VERSION: u8 = 3;
7268 const MIN_SERIALIZATION_VERSION: u8 = 3;
7269
7270 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7271         (0, FailRelay),
7272         (1, FailMalformed),
7273         (2, Fulfill),
7274 );
7275
7276 impl Writeable for ChannelUpdateStatus {
7277         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7278                 // We only care about writing out the current state as it was announced, ie only either
7279                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7280                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7281                 match self {
7282                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7283                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7284                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7285                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7286                 }
7287                 Ok(())
7288         }
7289 }
7290
7291 impl Readable for ChannelUpdateStatus {
7292         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7293                 Ok(match <u8 as Readable>::read(reader)? {
7294                         0 => ChannelUpdateStatus::Enabled,
7295                         1 => ChannelUpdateStatus::Disabled,
7296                         _ => return Err(DecodeError::InvalidValue),
7297                 })
7298         }
7299 }
7300
7301 impl Writeable for AnnouncementSigsState {
7302         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7303                 // We only care about writing out the current state as if we had just disconnected, at
7304                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7305                 match self {
7306                         AnnouncementSigsState::NotSent => 0u8.write(writer),
7307                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
7308                         AnnouncementSigsState::Committed => 0u8.write(writer),
7309                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7310                 }
7311         }
7312 }
7313
7314 impl Readable for AnnouncementSigsState {
7315         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7316                 Ok(match <u8 as Readable>::read(reader)? {
7317                         0 => AnnouncementSigsState::NotSent,
7318                         1 => AnnouncementSigsState::PeerReceived,
7319                         _ => return Err(DecodeError::InvalidValue),
7320                 })
7321         }
7322 }
7323
7324 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7325         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7326                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7327                 // called.
7328
7329                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7330
7331                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7332                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7333                 // the low bytes now and the optional high bytes later.
7334                 let user_id_low = self.context.user_id as u64;
7335                 user_id_low.write(writer)?;
7336
7337                 // Version 1 deserializers expected to read parts of the config object here. Version 2
7338                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7339                 // `minimum_depth` we simply write dummy values here.
7340                 writer.write_all(&[0; 8])?;
7341
7342                 self.context.channel_id.write(writer)?;
7343                 {
7344                         let mut channel_state = self.context.channel_state;
7345                         if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7346                                 channel_state.set_peer_disconnected();
7347                         }
7348                         channel_state.to_u32().write(writer)?;
7349                 }
7350                 self.context.channel_value_satoshis.write(writer)?;
7351
7352                 self.context.latest_monitor_update_id.write(writer)?;
7353
7354                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7355                 // deserialized from that format.
7356                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7357                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7358                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7359                 }
7360                 self.context.destination_script.write(writer)?;
7361
7362                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7363                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7364                 self.context.value_to_self_msat.write(writer)?;
7365
7366                 let mut dropped_inbound_htlcs = 0;
7367                 for htlc in self.context.pending_inbound_htlcs.iter() {
7368                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7369                                 dropped_inbound_htlcs += 1;
7370                         }
7371                 }
7372                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7373                 for htlc in self.context.pending_inbound_htlcs.iter() {
7374                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7375                                 continue; // Drop
7376                         }
7377                         htlc.htlc_id.write(writer)?;
7378                         htlc.amount_msat.write(writer)?;
7379                         htlc.cltv_expiry.write(writer)?;
7380                         htlc.payment_hash.write(writer)?;
7381                         match &htlc.state {
7382                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7383                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7384                                         1u8.write(writer)?;
7385                                         htlc_state.write(writer)?;
7386                                 },
7387                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7388                                         2u8.write(writer)?;
7389                                         htlc_state.write(writer)?;
7390                                 },
7391                                 &InboundHTLCState::Committed => {
7392                                         3u8.write(writer)?;
7393                                 },
7394                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7395                                         4u8.write(writer)?;
7396                                         removal_reason.write(writer)?;
7397                                 },
7398                         }
7399                 }
7400
7401                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7402                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7403                 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7404
7405                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7406                 for htlc in self.context.pending_outbound_htlcs.iter() {
7407                         htlc.htlc_id.write(writer)?;
7408                         htlc.amount_msat.write(writer)?;
7409                         htlc.cltv_expiry.write(writer)?;
7410                         htlc.payment_hash.write(writer)?;
7411                         htlc.source.write(writer)?;
7412                         match &htlc.state {
7413                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
7414                                         0u8.write(writer)?;
7415                                         onion_packet.write(writer)?;
7416                                 },
7417                                 &OutboundHTLCState::Committed => {
7418                                         1u8.write(writer)?;
7419                                 },
7420                                 &OutboundHTLCState::RemoteRemoved(_) => {
7421                                         // Treat this as a Committed because we haven't received the CS - they'll
7422                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
7423                                         1u8.write(writer)?;
7424                                 },
7425                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
7426                                         3u8.write(writer)?;
7427                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
7428                                                 preimages.push(preimage);
7429                                         }
7430                                         let reason: Option<&HTLCFailReason> = outcome.into();
7431                                         reason.write(writer)?;
7432                                 }
7433                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
7434                                         4u8.write(writer)?;
7435                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
7436                                                 preimages.push(preimage);
7437                                         }
7438                                         let reason: Option<&HTLCFailReason> = outcome.into();
7439                                         reason.write(writer)?;
7440                                 }
7441                         }
7442                         pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
7443                         pending_outbound_blinding_points.push(htlc.blinding_point);
7444                 }
7445
7446                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
7447                 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7448                 // Vec of (htlc_id, failure_code, sha256_of_onion)
7449                 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
7450                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
7451                 for update in self.context.holding_cell_htlc_updates.iter() {
7452                         match update {
7453                                 &HTLCUpdateAwaitingACK::AddHTLC {
7454                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
7455                                         blinding_point, skimmed_fee_msat,
7456                                 } => {
7457                                         0u8.write(writer)?;
7458                                         amount_msat.write(writer)?;
7459                                         cltv_expiry.write(writer)?;
7460                                         payment_hash.write(writer)?;
7461                                         source.write(writer)?;
7462                                         onion_routing_packet.write(writer)?;
7463
7464                                         holding_cell_skimmed_fees.push(skimmed_fee_msat);
7465                                         holding_cell_blinding_points.push(blinding_point);
7466                                 },
7467                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
7468                                         1u8.write(writer)?;
7469                                         payment_preimage.write(writer)?;
7470                                         htlc_id.write(writer)?;
7471                                 },
7472                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
7473                                         2u8.write(writer)?;
7474                                         htlc_id.write(writer)?;
7475                                         err_packet.write(writer)?;
7476                                 }
7477                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
7478                                         htlc_id, failure_code, sha256_of_onion
7479                                 } => {
7480                                         // We don't want to break downgrading by adding a new variant, so write a dummy
7481                                         // `::FailHTLC` variant and write the real malformed error as an optional TLV.
7482                                         malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
7483
7484                                         let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
7485                                         2u8.write(writer)?;
7486                                         htlc_id.write(writer)?;
7487                                         dummy_err_packet.write(writer)?;
7488                                 }
7489                         }
7490                 }
7491
7492                 match self.context.resend_order {
7493                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
7494                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
7495                 }
7496
7497                 self.context.monitor_pending_channel_ready.write(writer)?;
7498                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
7499                 self.context.monitor_pending_commitment_signed.write(writer)?;
7500
7501                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
7502                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
7503                         pending_forward.write(writer)?;
7504                         htlc_id.write(writer)?;
7505                 }
7506
7507                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
7508                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
7509                         htlc_source.write(writer)?;
7510                         payment_hash.write(writer)?;
7511                         fail_reason.write(writer)?;
7512                 }
7513
7514                 if self.context.is_outbound() {
7515                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
7516                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
7517                         Some(feerate).write(writer)?;
7518                 } else {
7519                         // As for inbound HTLCs, if the update was only announced and never committed in a
7520                         // commitment_signed, drop it.
7521                         None::<u32>.write(writer)?;
7522                 }
7523                 self.context.holding_cell_update_fee.write(writer)?;
7524
7525                 self.context.next_holder_htlc_id.write(writer)?;
7526                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
7527                 self.context.update_time_counter.write(writer)?;
7528                 self.context.feerate_per_kw.write(writer)?;
7529
7530                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7531                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7532                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7533                 // consider the stale state on reload.
7534                 0u8.write(writer)?;
7535
7536                 self.context.funding_tx_confirmed_in.write(writer)?;
7537                 self.context.funding_tx_confirmation_height.write(writer)?;
7538                 self.context.short_channel_id.write(writer)?;
7539
7540                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
7541                 self.context.holder_dust_limit_satoshis.write(writer)?;
7542                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
7543
7544                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7545                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
7546
7547                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
7548                 self.context.holder_htlc_minimum_msat.write(writer)?;
7549                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
7550
7551                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
7552                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
7553
7554                 match &self.context.counterparty_forwarding_info {
7555                         Some(info) => {
7556                                 1u8.write(writer)?;
7557                                 info.fee_base_msat.write(writer)?;
7558                                 info.fee_proportional_millionths.write(writer)?;
7559                                 info.cltv_expiry_delta.write(writer)?;
7560                         },
7561                         None => 0u8.write(writer)?
7562                 }
7563
7564                 self.context.channel_transaction_parameters.write(writer)?;
7565                 self.context.funding_transaction.write(writer)?;
7566
7567                 self.context.counterparty_cur_commitment_point.write(writer)?;
7568                 self.context.counterparty_prev_commitment_point.write(writer)?;
7569                 self.context.counterparty_node_id.write(writer)?;
7570
7571                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
7572
7573                 self.context.commitment_secrets.write(writer)?;
7574
7575                 self.context.channel_update_status.write(writer)?;
7576
7577                 #[cfg(any(test, fuzzing))]
7578                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
7579                 #[cfg(any(test, fuzzing))]
7580                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
7581                         htlc.write(writer)?;
7582                 }
7583
7584                 // If the channel type is something other than only-static-remote-key, then we need to have
7585                 // older clients fail to deserialize this channel at all. If the type is
7586                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
7587                 // out at all.
7588                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
7589                         Some(&self.context.channel_type) } else { None };
7590
7591                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
7592                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
7593                 // a different percentage of the channel value then 10%, which older versions of LDK used
7594                 // to set it to before the percentage was made configurable.
7595                 let serialized_holder_selected_reserve =
7596                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
7597                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
7598
7599                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
7600                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
7601                 let serialized_holder_htlc_max_in_flight =
7602                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
7603                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
7604
7605                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
7606                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
7607
7608                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7609                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
7610                 // we write the high bytes as an option here.
7611                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
7612
7613                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
7614
7615                 write_tlv_fields!(writer, {
7616                         (0, self.context.announcement_sigs, option),
7617                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
7618                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
7619                         // them twice, once with their original default values above, and once as an option
7620                         // here. On the read side, old versions will simply ignore the odd-type entries here,
7621                         // and new versions map the default values to None and allow the TLV entries here to
7622                         // override that.
7623                         (1, self.context.minimum_depth, option),
7624                         (2, chan_type, option),
7625                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
7626                         (4, serialized_holder_selected_reserve, option),
7627                         (5, self.context.config, required),
7628                         (6, serialized_holder_htlc_max_in_flight, option),
7629                         (7, self.context.shutdown_scriptpubkey, option),
7630                         (8, self.context.blocked_monitor_updates, optional_vec),
7631                         (9, self.context.target_closing_feerate_sats_per_kw, option),
7632                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
7633                         (13, self.context.channel_creation_height, required),
7634                         (15, preimages, required_vec),
7635                         (17, self.context.announcement_sigs_state, required),
7636                         (19, self.context.latest_inbound_scid_alias, option),
7637                         (21, self.context.outbound_scid_alias, required),
7638                         (23, channel_ready_event_emitted, option),
7639                         (25, user_id_high_opt, option),
7640                         (27, self.context.channel_keys_id, required),
7641                         (28, holder_max_accepted_htlcs, option),
7642                         (29, self.context.temporary_channel_id, option),
7643                         (31, channel_pending_event_emitted, option),
7644                         (35, pending_outbound_skimmed_fees, optional_vec),
7645                         (37, holding_cell_skimmed_fees, optional_vec),
7646                         (38, self.context.is_batch_funding, option),
7647                         (39, pending_outbound_blinding_points, optional_vec),
7648                         (41, holding_cell_blinding_points, optional_vec),
7649                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7650                 });
7651
7652                 Ok(())
7653         }
7654 }
7655
7656 const MAX_ALLOC_SIZE: usize = 64*1024;
7657 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
7658                 where
7659                         ES::Target: EntropySource,
7660                         SP::Target: SignerProvider
7661 {
7662         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
7663                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
7664                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
7665
7666                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7667                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
7668                 // the low bytes now and the high bytes later.
7669                 let user_id_low: u64 = Readable::read(reader)?;
7670
7671                 let mut config = Some(LegacyChannelConfig::default());
7672                 if ver == 1 {
7673                         // Read the old serialization of the ChannelConfig from version 0.0.98.
7674                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
7675                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
7676                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
7677                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
7678                 } else {
7679                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
7680                         let mut _val: u64 = Readable::read(reader)?;
7681                 }
7682
7683                 let channel_id = Readable::read(reader)?;
7684                 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
7685                 let channel_value_satoshis = Readable::read(reader)?;
7686
7687                 let latest_monitor_update_id = Readable::read(reader)?;
7688
7689                 let mut keys_data = None;
7690                 if ver <= 2 {
7691                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
7692                         // the `channel_keys_id` TLV is present below.
7693                         let keys_len: u32 = Readable::read(reader)?;
7694                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
7695                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
7696                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
7697                                 let mut data = [0; 1024];
7698                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
7699                                 reader.read_exact(read_slice)?;
7700                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
7701                         }
7702                 }
7703
7704                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
7705                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
7706                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
7707                         Err(_) => None,
7708                 };
7709                 let destination_script = Readable::read(reader)?;
7710
7711                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
7712                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
7713                 let value_to_self_msat = Readable::read(reader)?;
7714
7715                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
7716
7717                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7718                 for _ in 0..pending_inbound_htlc_count {
7719                         pending_inbound_htlcs.push(InboundHTLCOutput {
7720                                 htlc_id: Readable::read(reader)?,
7721                                 amount_msat: Readable::read(reader)?,
7722                                 cltv_expiry: Readable::read(reader)?,
7723                                 payment_hash: Readable::read(reader)?,
7724                                 state: match <u8 as Readable>::read(reader)? {
7725                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
7726                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
7727                                         3 => InboundHTLCState::Committed,
7728                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
7729                                         _ => return Err(DecodeError::InvalidValue),
7730                                 },
7731                         });
7732                 }
7733
7734                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
7735                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
7736                 for _ in 0..pending_outbound_htlc_count {
7737                         pending_outbound_htlcs.push(OutboundHTLCOutput {
7738                                 htlc_id: Readable::read(reader)?,
7739                                 amount_msat: Readable::read(reader)?,
7740                                 cltv_expiry: Readable::read(reader)?,
7741                                 payment_hash: Readable::read(reader)?,
7742                                 source: Readable::read(reader)?,
7743                                 state: match <u8 as Readable>::read(reader)? {
7744                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
7745                                         1 => OutboundHTLCState::Committed,
7746                                         2 => {
7747                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7748                                                 OutboundHTLCState::RemoteRemoved(option.into())
7749                                         },
7750                                         3 => {
7751                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7752                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
7753                                         },
7754                                         4 => {
7755                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
7756                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
7757                                         },
7758                                         _ => return Err(DecodeError::InvalidValue),
7759                                 },
7760                                 skimmed_fee_msat: None,
7761                                 blinding_point: None,
7762                         });
7763                 }
7764
7765                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
7766                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
7767                 for _ in 0..holding_cell_htlc_update_count {
7768                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
7769                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
7770                                         amount_msat: Readable::read(reader)?,
7771                                         cltv_expiry: Readable::read(reader)?,
7772                                         payment_hash: Readable::read(reader)?,
7773                                         source: Readable::read(reader)?,
7774                                         onion_routing_packet: Readable::read(reader)?,
7775                                         skimmed_fee_msat: None,
7776                                         blinding_point: None,
7777                                 },
7778                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
7779                                         payment_preimage: Readable::read(reader)?,
7780                                         htlc_id: Readable::read(reader)?,
7781                                 },
7782                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
7783                                         htlc_id: Readable::read(reader)?,
7784                                         err_packet: Readable::read(reader)?,
7785                                 },
7786                                 _ => return Err(DecodeError::InvalidValue),
7787                         });
7788                 }
7789
7790                 let resend_order = match <u8 as Readable>::read(reader)? {
7791                         0 => RAACommitmentOrder::CommitmentFirst,
7792                         1 => RAACommitmentOrder::RevokeAndACKFirst,
7793                         _ => return Err(DecodeError::InvalidValue),
7794                 };
7795
7796                 let monitor_pending_channel_ready = Readable::read(reader)?;
7797                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
7798                 let monitor_pending_commitment_signed = Readable::read(reader)?;
7799
7800                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
7801                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
7802                 for _ in 0..monitor_pending_forwards_count {
7803                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
7804                 }
7805
7806                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
7807                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
7808                 for _ in 0..monitor_pending_failures_count {
7809                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
7810                 }
7811
7812                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
7813
7814                 let holding_cell_update_fee = Readable::read(reader)?;
7815
7816                 let next_holder_htlc_id = Readable::read(reader)?;
7817                 let next_counterparty_htlc_id = Readable::read(reader)?;
7818                 let update_time_counter = Readable::read(reader)?;
7819                 let feerate_per_kw = Readable::read(reader)?;
7820
7821                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
7822                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
7823                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
7824                 // consider the stale state on reload.
7825                 match <u8 as Readable>::read(reader)? {
7826                         0 => {},
7827                         1 => {
7828                                 let _: u32 = Readable::read(reader)?;
7829                                 let _: u64 = Readable::read(reader)?;
7830                                 let _: Signature = Readable::read(reader)?;
7831                         },
7832                         _ => return Err(DecodeError::InvalidValue),
7833                 }
7834
7835                 let funding_tx_confirmed_in = Readable::read(reader)?;
7836                 let funding_tx_confirmation_height = Readable::read(reader)?;
7837                 let short_channel_id = Readable::read(reader)?;
7838
7839                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
7840                 let holder_dust_limit_satoshis = Readable::read(reader)?;
7841                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
7842                 let mut counterparty_selected_channel_reserve_satoshis = None;
7843                 if ver == 1 {
7844                         // Read the old serialization from version 0.0.98.
7845                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
7846                 } else {
7847                         // Read the 8 bytes of backwards-compatibility data.
7848                         let _dummy: u64 = Readable::read(reader)?;
7849                 }
7850                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
7851                 let holder_htlc_minimum_msat = Readable::read(reader)?;
7852                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
7853
7854                 let mut minimum_depth = None;
7855                 if ver == 1 {
7856                         // Read the old serialization from version 0.0.98.
7857                         minimum_depth = Some(Readable::read(reader)?);
7858                 } else {
7859                         // Read the 4 bytes of backwards-compatibility data.
7860                         let _dummy: u32 = Readable::read(reader)?;
7861                 }
7862
7863                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
7864                         0 => None,
7865                         1 => Some(CounterpartyForwardingInfo {
7866                                 fee_base_msat: Readable::read(reader)?,
7867                                 fee_proportional_millionths: Readable::read(reader)?,
7868                                 cltv_expiry_delta: Readable::read(reader)?,
7869                         }),
7870                         _ => return Err(DecodeError::InvalidValue),
7871                 };
7872
7873                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
7874                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
7875
7876                 let counterparty_cur_commitment_point = Readable::read(reader)?;
7877
7878                 let counterparty_prev_commitment_point = Readable::read(reader)?;
7879                 let counterparty_node_id = Readable::read(reader)?;
7880
7881                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
7882                 let commitment_secrets = Readable::read(reader)?;
7883
7884                 let channel_update_status = Readable::read(reader)?;
7885
7886                 #[cfg(any(test, fuzzing))]
7887                 let mut historical_inbound_htlc_fulfills = HashSet::new();
7888                 #[cfg(any(test, fuzzing))]
7889                 {
7890                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
7891                         for _ in 0..htlc_fulfills_len {
7892                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
7893                         }
7894                 }
7895
7896                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
7897                         Some((feerate, if channel_parameters.is_outbound_from_holder {
7898                                 FeeUpdateState::Outbound
7899                         } else {
7900                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
7901                         }))
7902                 } else {
7903                         None
7904                 };
7905
7906                 let mut announcement_sigs = None;
7907                 let mut target_closing_feerate_sats_per_kw = None;
7908                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
7909                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
7910                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
7911                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
7912                 // only, so we default to that if none was written.
7913                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
7914                 let mut channel_creation_height = Some(serialized_height);
7915                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
7916
7917                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
7918                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
7919                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
7920                 let mut latest_inbound_scid_alias = None;
7921                 let mut outbound_scid_alias = None;
7922                 let mut channel_pending_event_emitted = None;
7923                 let mut channel_ready_event_emitted = None;
7924
7925                 let mut user_id_high_opt: Option<u64> = None;
7926                 let mut channel_keys_id: Option<[u8; 32]> = None;
7927                 let mut temporary_channel_id: Option<ChannelId> = None;
7928                 let mut holder_max_accepted_htlcs: Option<u16> = None;
7929
7930                 let mut blocked_monitor_updates = Some(Vec::new());
7931
7932                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7933                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
7934
7935                 let mut is_batch_funding: Option<()> = None;
7936
7937                 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7938                 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
7939
7940                 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
7941
7942                 read_tlv_fields!(reader, {
7943                         (0, announcement_sigs, option),
7944                         (1, minimum_depth, option),
7945                         (2, channel_type, option),
7946                         (3, counterparty_selected_channel_reserve_satoshis, option),
7947                         (4, holder_selected_channel_reserve_satoshis, option),
7948                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
7949                         (6, holder_max_htlc_value_in_flight_msat, option),
7950                         (7, shutdown_scriptpubkey, option),
7951                         (8, blocked_monitor_updates, optional_vec),
7952                         (9, target_closing_feerate_sats_per_kw, option),
7953                         (11, monitor_pending_finalized_fulfills, optional_vec),
7954                         (13, channel_creation_height, option),
7955                         (15, preimages_opt, optional_vec),
7956                         (17, announcement_sigs_state, option),
7957                         (19, latest_inbound_scid_alias, option),
7958                         (21, outbound_scid_alias, option),
7959                         (23, channel_ready_event_emitted, option),
7960                         (25, user_id_high_opt, option),
7961                         (27, channel_keys_id, option),
7962                         (28, holder_max_accepted_htlcs, option),
7963                         (29, temporary_channel_id, option),
7964                         (31, channel_pending_event_emitted, option),
7965                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
7966                         (37, holding_cell_skimmed_fees_opt, optional_vec),
7967                         (38, is_batch_funding, option),
7968                         (39, pending_outbound_blinding_points_opt, optional_vec),
7969                         (41, holding_cell_blinding_points_opt, optional_vec),
7970                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
7971                 });
7972
7973                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
7974                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7975                         // If we've gotten to the funding stage of the channel, populate the signer with its
7976                         // required channel parameters.
7977                         if channel_state >= ChannelState::FundingNegotiated {
7978                                 holder_signer.provide_channel_parameters(&channel_parameters);
7979                         }
7980                         (channel_keys_id, holder_signer)
7981                 } else {
7982                         // `keys_data` can be `None` if we had corrupted data.
7983                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
7984                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
7985                         (holder_signer.channel_keys_id(), holder_signer)
7986                 };
7987
7988                 if let Some(preimages) = preimages_opt {
7989                         let mut iter = preimages.into_iter();
7990                         for htlc in pending_outbound_htlcs.iter_mut() {
7991                                 match &htlc.state {
7992                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
7993                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7994                                         }
7995                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
7996                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
7997                                         }
7998                                         _ => {}
7999                                 }
8000                         }
8001                         // We expect all preimages to be consumed above
8002                         if iter.next().is_some() {
8003                                 return Err(DecodeError::InvalidValue);
8004                         }
8005                 }
8006
8007                 let chan_features = channel_type.as_ref().unwrap();
8008                 if !chan_features.is_subset(our_supported_features) {
8009                         // If the channel was written by a new version and negotiated with features we don't
8010                         // understand yet, refuse to read it.
8011                         return Err(DecodeError::UnknownRequiredFeature);
8012                 }
8013
8014                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8015                 // To account for that, we're proactively setting/overriding the field here.
8016                 channel_parameters.channel_type_features = chan_features.clone();
8017
8018                 let mut secp_ctx = Secp256k1::new();
8019                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8020
8021                 // `user_id` used to be a single u64 value. In order to remain backwards
8022                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8023                 // separate u64 values.
8024                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8025
8026                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8027
8028                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8029                         let mut iter = skimmed_fees.into_iter();
8030                         for htlc in pending_outbound_htlcs.iter_mut() {
8031                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8032                         }
8033                         // We expect all skimmed fees to be consumed above
8034                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8035                 }
8036                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8037                         let mut iter = skimmed_fees.into_iter();
8038                         for htlc in holding_cell_htlc_updates.iter_mut() {
8039                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8040                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8041                                 }
8042                         }
8043                         // We expect all skimmed fees to be consumed above
8044                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8045                 }
8046                 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8047                         let mut iter = blinding_pts.into_iter();
8048                         for htlc in pending_outbound_htlcs.iter_mut() {
8049                                 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8050                         }
8051                         // We expect all blinding points to be consumed above
8052                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8053                 }
8054                 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8055                         let mut iter = blinding_pts.into_iter();
8056                         for htlc in holding_cell_htlc_updates.iter_mut() {
8057                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8058                                         *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8059                                 }
8060                         }
8061                         // We expect all blinding points to be consumed above
8062                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8063                 }
8064
8065                 if let Some(malformed_htlcs) = malformed_htlcs {
8066                         for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8067                                 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8068                                         if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8069                                                 let matches = *htlc_id == malformed_htlc_id;
8070                                                 if matches { debug_assert!(err_packet.data.is_empty()) }
8071                                                 matches
8072                                         } else { false }
8073                                 }).ok_or(DecodeError::InvalidValue)?;
8074                                 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8075                                         htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8076                                 };
8077                                 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8078                         }
8079                 }
8080
8081                 Ok(Channel {
8082                         context: ChannelContext {
8083                                 user_id,
8084
8085                                 config: config.unwrap(),
8086
8087                                 prev_config: None,
8088
8089                                 // Note that we don't care about serializing handshake limits as we only ever serialize
8090                                 // channel data after the handshake has completed.
8091                                 inbound_handshake_limits_override: None,
8092
8093                                 channel_id,
8094                                 temporary_channel_id,
8095                                 channel_state,
8096                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
8097                                 secp_ctx,
8098                                 channel_value_satoshis,
8099
8100                                 latest_monitor_update_id,
8101
8102                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8103                                 shutdown_scriptpubkey,
8104                                 destination_script,
8105
8106                                 cur_holder_commitment_transaction_number,
8107                                 cur_counterparty_commitment_transaction_number,
8108                                 value_to_self_msat,
8109
8110                                 holder_max_accepted_htlcs,
8111                                 pending_inbound_htlcs,
8112                                 pending_outbound_htlcs,
8113                                 holding_cell_htlc_updates,
8114
8115                                 resend_order,
8116
8117                                 monitor_pending_channel_ready,
8118                                 monitor_pending_revoke_and_ack,
8119                                 monitor_pending_commitment_signed,
8120                                 monitor_pending_forwards,
8121                                 monitor_pending_failures,
8122                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8123
8124                                 signer_pending_commitment_update: false,
8125                                 signer_pending_funding: false,
8126
8127                                 pending_update_fee,
8128                                 holding_cell_update_fee,
8129                                 next_holder_htlc_id,
8130                                 next_counterparty_htlc_id,
8131                                 update_time_counter,
8132                                 feerate_per_kw,
8133
8134                                 #[cfg(debug_assertions)]
8135                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8136                                 #[cfg(debug_assertions)]
8137                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8138
8139                                 last_sent_closing_fee: None,
8140                                 pending_counterparty_closing_signed: None,
8141                                 expecting_peer_commitment_signed: false,
8142                                 closing_fee_limits: None,
8143                                 target_closing_feerate_sats_per_kw,
8144
8145                                 funding_tx_confirmed_in,
8146                                 funding_tx_confirmation_height,
8147                                 short_channel_id,
8148                                 channel_creation_height: channel_creation_height.unwrap(),
8149
8150                                 counterparty_dust_limit_satoshis,
8151                                 holder_dust_limit_satoshis,
8152                                 counterparty_max_htlc_value_in_flight_msat,
8153                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8154                                 counterparty_selected_channel_reserve_satoshis,
8155                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8156                                 counterparty_htlc_minimum_msat,
8157                                 holder_htlc_minimum_msat,
8158                                 counterparty_max_accepted_htlcs,
8159                                 minimum_depth,
8160
8161                                 counterparty_forwarding_info,
8162
8163                                 channel_transaction_parameters: channel_parameters,
8164                                 funding_transaction,
8165                                 is_batch_funding,
8166
8167                                 counterparty_cur_commitment_point,
8168                                 counterparty_prev_commitment_point,
8169                                 counterparty_node_id,
8170
8171                                 counterparty_shutdown_scriptpubkey,
8172
8173                                 commitment_secrets,
8174
8175                                 channel_update_status,
8176                                 closing_signed_in_flight: false,
8177
8178                                 announcement_sigs,
8179
8180                                 #[cfg(any(test, fuzzing))]
8181                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8182                                 #[cfg(any(test, fuzzing))]
8183                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8184
8185                                 workaround_lnd_bug_4006: None,
8186                                 sent_message_awaiting_response: None,
8187
8188                                 latest_inbound_scid_alias,
8189                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8190                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8191
8192                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8193                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8194
8195                                 #[cfg(any(test, fuzzing))]
8196                                 historical_inbound_htlc_fulfills,
8197
8198                                 channel_type: channel_type.unwrap(),
8199                                 channel_keys_id,
8200
8201                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8202                         }
8203                 })
8204         }
8205 }
8206
8207 #[cfg(test)]
8208 mod tests {
8209         use std::cmp;
8210         use bitcoin::blockdata::constants::ChainHash;
8211         use bitcoin::blockdata::script::{ScriptBuf, Builder};
8212         use bitcoin::blockdata::transaction::{Transaction, TxOut};
8213         use bitcoin::blockdata::opcodes;
8214         use bitcoin::network::constants::Network;
8215         use crate::ln::{PaymentHash, PaymentPreimage};
8216         use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8217         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8218         use crate::ln::channel::InitFeatures;
8219         use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8220         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8221         use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8222         use crate::ln::msgs;
8223         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8224         use crate::ln::script::ShutdownScript;
8225         use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8226         use crate::chain::BestBlock;
8227         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8228         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8229         use crate::chain::transaction::OutPoint;
8230         use crate::routing::router::{Path, RouteHop};
8231         use crate::util::config::UserConfig;
8232         use crate::util::errors::APIError;
8233         use crate::util::ser::{ReadableArgs, Writeable};
8234         use crate::util::test_utils;
8235         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8236         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8237         use bitcoin::secp256k1::ffi::Signature as FFISignature;
8238         use bitcoin::secp256k1::{SecretKey,PublicKey};
8239         use bitcoin::hashes::sha256::Hash as Sha256;
8240         use bitcoin::hashes::Hash;
8241         use bitcoin::hashes::hex::FromHex;
8242         use bitcoin::hash_types::WPubkeyHash;
8243         use bitcoin::blockdata::locktime::absolute::LockTime;
8244         use bitcoin::address::{WitnessProgram, WitnessVersion};
8245         use crate::prelude::*;
8246
8247         struct TestFeeEstimator {
8248                 fee_est: u32
8249         }
8250         impl FeeEstimator for TestFeeEstimator {
8251                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8252                         self.fee_est
8253                 }
8254         }
8255
8256         #[test]
8257         fn test_max_funding_satoshis_no_wumbo() {
8258                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8259                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8260                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8261         }
8262
8263         struct Keys {
8264                 signer: InMemorySigner,
8265         }
8266
8267         impl EntropySource for Keys {
8268                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8269         }
8270
8271         impl SignerProvider for Keys {
8272                 type EcdsaSigner = InMemorySigner;
8273                 #[cfg(taproot)]
8274                 type TaprootSigner = InMemorySigner;
8275
8276                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8277                         self.signer.channel_keys_id()
8278                 }
8279
8280                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8281                         self.signer.clone()
8282                 }
8283
8284                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8285
8286                 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8287                         let secp_ctx = Secp256k1::signing_only();
8288                         let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8289                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8290                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8291                 }
8292
8293                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8294                         let secp_ctx = Secp256k1::signing_only();
8295                         let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8296                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8297                 }
8298         }
8299
8300         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8301         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8302                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8303         }
8304
8305         #[test]
8306         fn upfront_shutdown_script_incompatibility() {
8307                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8308                 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8309                         &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8310                 ).unwrap();
8311
8312                 let seed = [42; 32];
8313                 let network = Network::Testnet;
8314                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8315                 keys_provider.expect(OnGetShutdownScriptpubkey {
8316                         returns: non_v0_segwit_shutdown_script.clone(),
8317                 });
8318
8319                 let secp_ctx = Secp256k1::new();
8320                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8321                 let config = UserConfig::default();
8322                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8323                         Err(APIError::IncompatibleShutdownScript { script }) => {
8324                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8325                         },
8326                         Err(e) => panic!("Unexpected error: {:?}", e),
8327                         Ok(_) => panic!("Expected error"),
8328                 }
8329         }
8330
8331         // Check that, during channel creation, we use the same feerate in the open channel message
8332         // as we do in the Channel object creation itself.
8333         #[test]
8334         fn test_open_channel_msg_fee() {
8335                 let original_fee = 253;
8336                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8337                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8338                 let secp_ctx = Secp256k1::new();
8339                 let seed = [42; 32];
8340                 let network = Network::Testnet;
8341                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8342
8343                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8344                 let config = UserConfig::default();
8345                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8346
8347                 // Now change the fee so we can check that the fee in the open_channel message is the
8348                 // same as the old fee.
8349                 fee_est.fee_est = 500;
8350                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8351                 assert_eq!(open_channel_msg.feerate_per_kw, original_fee);
8352         }
8353
8354         #[test]
8355         fn test_holder_vs_counterparty_dust_limit() {
8356                 // Test that when calculating the local and remote commitment transaction fees, the correct
8357                 // dust limits are used.
8358                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8359                 let secp_ctx = Secp256k1::new();
8360                 let seed = [42; 32];
8361                 let network = Network::Testnet;
8362                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8363                 let logger = test_utils::TestLogger::new();
8364                 let best_block = BestBlock::from_network(network);
8365
8366                 // Go through the flow of opening a channel between two nodes, making sure
8367                 // they have different dust limits.
8368
8369                 // Create Node A's channel pointing to Node B's pubkey
8370                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8371                 let config = UserConfig::default();
8372                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8373
8374                 // Create Node B's channel by receiving Node A's open_channel message
8375                 // Make sure A's dust limit is as we expect.
8376                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8377                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8378                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8379
8380                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8381                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8382                 accept_channel_msg.dust_limit_satoshis = 546;
8383                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8384                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8385
8386                 // Node A --> Node B: funding created
8387                 let output_script = node_a_chan.context.get_funding_redeemscript();
8388                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8389                         value: 10000000, script_pubkey: output_script.clone(),
8390                 }]};
8391                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8392                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8393                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8394
8395                 // Node B --> Node A: funding signed
8396                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8397                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8398
8399                 // Put some inbound and outbound HTLCs in A's channel.
8400                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
8401                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
8402                         htlc_id: 0,
8403                         amount_msat: htlc_amount_msat,
8404                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
8405                         cltv_expiry: 300000000,
8406                         state: InboundHTLCState::Committed,
8407                 });
8408
8409                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
8410                         htlc_id: 1,
8411                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
8412                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
8413                         cltv_expiry: 200000000,
8414                         state: OutboundHTLCState::Committed,
8415                         source: HTLCSource::OutboundRoute {
8416                                 path: Path { hops: Vec::new(), blinded_tail: None },
8417                                 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8418                                 first_hop_htlc_msat: 548,
8419                                 payment_id: PaymentId([42; 32]),
8420                         },
8421                         skimmed_fee_msat: None,
8422                         blinding_point: None,
8423                 });
8424
8425                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
8426                 // the dust limit check.
8427                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8428                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8429                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
8430                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
8431
8432                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
8433                 // of the HTLCs are seen to be above the dust limit.
8434                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8435                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
8436                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
8437                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8438                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
8439         }
8440
8441         #[test]
8442         fn test_timeout_vs_success_htlc_dust_limit() {
8443                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
8444                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
8445                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
8446                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
8447                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
8448                 let secp_ctx = Secp256k1::new();
8449                 let seed = [42; 32];
8450                 let network = Network::Testnet;
8451                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8452
8453                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8454                 let config = UserConfig::default();
8455                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8456
8457                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
8458                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
8459
8460                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
8461                 // counted as dust when it shouldn't be.
8462                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
8463                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8464                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8465                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8466
8467                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8468                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
8469                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8470                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
8471                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8472
8473                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
8474
8475                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
8476                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
8477                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
8478                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8479                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
8480
8481                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
8482                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
8483                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
8484                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
8485                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
8486         }
8487
8488         #[test]
8489         fn channel_reestablish_no_updates() {
8490                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8491                 let logger = test_utils::TestLogger::new();
8492                 let secp_ctx = Secp256k1::new();
8493                 let seed = [42; 32];
8494                 let network = Network::Testnet;
8495                 let best_block = BestBlock::from_network(network);
8496                 let chain_hash = ChainHash::using_genesis_block(network);
8497                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8498
8499                 // Go through the flow of opening a channel between two nodes.
8500
8501                 // Create Node A's channel pointing to Node B's pubkey
8502                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8503                 let config = UserConfig::default();
8504                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8505
8506                 // Create Node B's channel by receiving Node A's open_channel message
8507                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
8508                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8509                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8510
8511                 // Node B --> Node A: accept channel
8512                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
8513                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8514
8515                 // Node A --> Node B: funding created
8516                 let output_script = node_a_chan.context.get_funding_redeemscript();
8517                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8518                         value: 10000000, script_pubkey: output_script.clone(),
8519                 }]};
8520                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8521                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8522                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8523
8524                 // Node B --> Node A: funding signed
8525                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8526                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8527
8528                 // Now disconnect the two nodes and check that the commitment point in
8529                 // Node B's channel_reestablish message is sane.
8530                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8531                 let msg = node_b_chan.get_channel_reestablish(&&logger);
8532                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8533                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8534                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8535
8536                 // Check that the commitment point in Node A's channel_reestablish message
8537                 // is sane.
8538                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
8539                 let msg = node_a_chan.get_channel_reestablish(&&logger);
8540                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
8541                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
8542                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
8543         }
8544
8545         #[test]
8546         fn test_configured_holder_max_htlc_value_in_flight() {
8547                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8548                 let logger = test_utils::TestLogger::new();
8549                 let secp_ctx = Secp256k1::new();
8550                 let seed = [42; 32];
8551                 let network = Network::Testnet;
8552                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8553                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8554                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8555
8556                 let mut config_2_percent = UserConfig::default();
8557                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
8558                 let mut config_99_percent = UserConfig::default();
8559                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
8560                 let mut config_0_percent = UserConfig::default();
8561                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
8562                 let mut config_101_percent = UserConfig::default();
8563                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
8564
8565                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
8566                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8567                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
8568                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
8569                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
8570                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
8571
8572                 // Test with the upper bound - 1 of valid values (99%).
8573                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
8574                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
8575                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
8576
8577                 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
8578
8579                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
8580                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
8581                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
8582                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8583                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
8584                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
8585
8586                 // Test with the upper bound - 1 of valid values (99%).
8587                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8588                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
8589                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
8590
8591                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8592                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8593                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
8594                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
8595                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
8596
8597                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
8598                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8599                 // than 100.
8600                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
8601                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
8602                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
8603
8604                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
8605                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
8606                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8607                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
8608                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
8609
8610                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
8611                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
8612                 // than 100.
8613                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
8614                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
8615                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
8616         }
8617
8618         #[test]
8619         fn test_configured_holder_selected_channel_reserve_satoshis() {
8620
8621                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
8622                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
8623                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
8624
8625                 // Test with valid but unreasonably high channel reserves
8626                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
8627                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
8628                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
8629
8630                 // Test with calculated channel reserve less than lower bound
8631                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
8632                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
8633
8634                 // Test with invalid channel reserves since sum of both is greater than or equal
8635                 // to channel value
8636                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
8637                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
8638         }
8639
8640         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
8641                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
8642                 let logger = test_utils::TestLogger::new();
8643                 let secp_ctx = Secp256k1::new();
8644                 let seed = [42; 32];
8645                 let network = Network::Testnet;
8646                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8647                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8648                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8649
8650
8651                 let mut outbound_node_config = UserConfig::default();
8652                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8653                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
8654
8655                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
8656                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
8657
8658                 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
8659                 let mut inbound_node_config = UserConfig::default();
8660                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
8661
8662                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
8663                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
8664
8665                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
8666
8667                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
8668                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
8669                 } else {
8670                         // Channel Negotiations failed
8671                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
8672                         assert!(result.is_err());
8673                 }
8674         }
8675
8676         #[test]
8677         fn channel_update() {
8678                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8679                 let logger = test_utils::TestLogger::new();
8680                 let secp_ctx = Secp256k1::new();
8681                 let seed = [42; 32];
8682                 let network = Network::Testnet;
8683                 let best_block = BestBlock::from_network(network);
8684                 let chain_hash = ChainHash::using_genesis_block(network);
8685                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8686
8687                 // Create Node A's channel pointing to Node B's pubkey
8688                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8689                 let config = UserConfig::default();
8690                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8691
8692                 // Create Node B's channel by receiving Node A's open_channel message
8693                 // Make sure A's dust limit is as we expect.
8694                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8695                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8696                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8697
8698                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8699                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8700                 accept_channel_msg.dust_limit_satoshis = 546;
8701                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8702                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8703
8704                 // Node A --> Node B: funding created
8705                 let output_script = node_a_chan.context.get_funding_redeemscript();
8706                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8707                         value: 10000000, script_pubkey: output_script.clone(),
8708                 }]};
8709                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8710                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
8711                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
8712
8713                 // Node B --> Node A: funding signed
8714                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
8715                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
8716
8717                 // Make sure that receiving a channel update will update the Channel as expected.
8718                 let update = ChannelUpdate {
8719                         contents: UnsignedChannelUpdate {
8720                                 chain_hash,
8721                                 short_channel_id: 0,
8722                                 timestamp: 0,
8723                                 flags: 0,
8724                                 cltv_expiry_delta: 100,
8725                                 htlc_minimum_msat: 5,
8726                                 htlc_maximum_msat: MAX_VALUE_MSAT,
8727                                 fee_base_msat: 110,
8728                                 fee_proportional_millionths: 11,
8729                                 excess_data: Vec::new(),
8730                         },
8731                         signature: Signature::from(unsafe { FFISignature::new() })
8732                 };
8733                 assert!(node_a_chan.channel_update(&update).unwrap());
8734
8735                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
8736                 // change our official htlc_minimum_msat.
8737                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
8738                 match node_a_chan.context.counterparty_forwarding_info() {
8739                         Some(info) => {
8740                                 assert_eq!(info.cltv_expiry_delta, 100);
8741                                 assert_eq!(info.fee_base_msat, 110);
8742                                 assert_eq!(info.fee_proportional_millionths, 11);
8743                         },
8744                         None => panic!("expected counterparty forwarding info to be Some")
8745                 }
8746
8747                 assert!(!node_a_chan.channel_update(&update).unwrap());
8748         }
8749
8750         #[test]
8751         fn blinding_point_skimmed_fee_ser() {
8752                 // Ensure that channel blinding points and skimmed fees are (de)serialized properly.
8753                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8754                 let secp_ctx = Secp256k1::new();
8755                 let seed = [42; 32];
8756                 let network = Network::Testnet;
8757                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8758
8759                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8760                 let config = UserConfig::default();
8761                 let features = channelmanager::provided_init_features(&config);
8762                 let outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8763                 let mut chan = Channel { context: outbound_chan.context };
8764
8765                 let dummy_htlc_source = HTLCSource::OutboundRoute {
8766                         path: Path {
8767                                 hops: vec![RouteHop {
8768                                         pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
8769                                         node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
8770                                         cltv_expiry_delta: 0, maybe_announced_channel: false,
8771                                 }],
8772                                 blinded_tail: None
8773                         },
8774                         session_priv: test_utils::privkey(42),
8775                         first_hop_htlc_msat: 0,
8776                         payment_id: PaymentId([42; 32]),
8777                 };
8778                 let dummy_outbound_output = OutboundHTLCOutput {
8779                         htlc_id: 0,
8780                         amount_msat: 0,
8781                         payment_hash: PaymentHash([43; 32]),
8782                         cltv_expiry: 0,
8783                         state: OutboundHTLCState::Committed,
8784                         source: dummy_htlc_source.clone(),
8785                         skimmed_fee_msat: None,
8786                         blinding_point: None,
8787                 };
8788                 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
8789                 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
8790                         if idx % 2 == 0 {
8791                                 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
8792                         }
8793                         if idx % 3 == 0 {
8794                                 htlc.skimmed_fee_msat = Some(1);
8795                         }
8796                 }
8797                 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
8798
8799                 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
8800                         amount_msat: 0,
8801                         cltv_expiry: 0,
8802                         payment_hash: PaymentHash([43; 32]),
8803                         source: dummy_htlc_source.clone(),
8804                         onion_routing_packet: msgs::OnionPacket {
8805                                 version: 0,
8806                                 public_key: Ok(test_utils::pubkey(1)),
8807                                 hop_data: [0; 20*65],
8808                                 hmac: [0; 32]
8809                         },
8810                         skimmed_fee_msat: None,
8811                         blinding_point: None,
8812                 };
8813                 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
8814                         payment_preimage: PaymentPreimage([42; 32]),
8815                         htlc_id: 0,
8816                 };
8817                 let mut holding_cell_htlc_updates = Vec::with_capacity(10);
8818                 for i in 0..10 {
8819                         if i % 3 == 0 {
8820                                 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
8821                         } else if i % 3 == 1 {
8822                                 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
8823                         } else {
8824                                 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
8825                                 if let HTLCUpdateAwaitingACK::AddHTLC {
8826                                         ref mut blinding_point, ref mut skimmed_fee_msat, ..
8827                                 } = &mut dummy_add {
8828                                         *blinding_point = Some(test_utils::pubkey(42 + i));
8829                                         *skimmed_fee_msat = Some(42);
8830                                 } else { panic!() }
8831                                 holding_cell_htlc_updates.push(dummy_add);
8832                         }
8833                 }
8834                 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
8835
8836                 // Encode and decode the channel and ensure that the HTLCs within are the same.
8837                 let encoded_chan = chan.encode();
8838                 let mut s = crate::io::Cursor::new(&encoded_chan);
8839                 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
8840                 let features = channelmanager::provided_channel_type_features(&config);
8841                 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
8842                 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
8843                 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
8844         }
8845
8846         #[cfg(feature = "_test_vectors")]
8847         #[test]
8848         fn outbound_commitment_test() {
8849                 use bitcoin::sighash;
8850                 use bitcoin::consensus::encode::serialize;
8851                 use bitcoin::sighash::EcdsaSighashType;
8852                 use bitcoin::hashes::hex::FromHex;
8853                 use bitcoin::hash_types::Txid;
8854                 use bitcoin::secp256k1::Message;
8855                 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
8856                 use crate::ln::PaymentPreimage;
8857                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
8858                 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
8859                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
8860                 use crate::util::logger::Logger;
8861                 use crate::sync::Arc;
8862                 use core::str::FromStr;
8863                 use hex::DisplayHex;
8864
8865                 // Test vectors from BOLT 3 Appendices C and F (anchors):
8866                 let feeest = TestFeeEstimator{fee_est: 15000};
8867                 let logger : Arc<Logger> = Arc::new(test_utils::TestLogger::new());
8868                 let secp_ctx = Secp256k1::new();
8869
8870                 let mut signer = InMemorySigner::new(
8871                         &secp_ctx,
8872                         SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
8873                         SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
8874                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8875                         SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
8876                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
8877
8878                         // These aren't set in the test vectors:
8879                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
8880                         10_000_000,
8881                         [0; 32],
8882                         [0; 32],
8883                 );
8884
8885                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
8886                                 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
8887                 let keys_provider = Keys { signer: signer.clone() };
8888
8889                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8890                 let mut config = UserConfig::default();
8891                 config.channel_handshake_config.announced_channel = false;
8892                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
8893                 chan.context.holder_dust_limit_satoshis = 546;
8894                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
8895
8896                 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
8897
8898                 let counterparty_pubkeys = ChannelPublicKeys {
8899                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
8900                         revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
8901                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
8902                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
8903                         htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
8904                 };
8905                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
8906                         CounterpartyChannelTransactionParameters {
8907                                 pubkeys: counterparty_pubkeys.clone(),
8908                                 selected_contest_delay: 144
8909                         });
8910                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
8911                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
8912
8913                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
8914                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8915
8916                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
8917                            <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
8918
8919                 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
8920                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
8921
8922                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
8923                 // derived from a commitment_seed, so instead we copy it here and call
8924                 // build_commitment_transaction.
8925                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
8926                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
8927                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
8928                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
8929                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
8930
8931                 macro_rules! test_commitment {
8932                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8933                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
8934                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
8935                         };
8936                 }
8937
8938                 macro_rules! test_commitment_with_anchors {
8939                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
8940                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
8941                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
8942                         };
8943                 }
8944
8945                 macro_rules! test_commitment_common {
8946                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
8947                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
8948                         } ) => { {
8949                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
8950                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
8951
8952                                         let htlcs = commitment_stats.htlcs_included.drain(..)
8953                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
8954                                                 .collect();
8955                                         (commitment_stats.tx, htlcs)
8956                                 };
8957                                 let trusted_tx = commitment_tx.trust();
8958                                 let unsigned_tx = trusted_tx.built_transaction();
8959                                 let redeemscript = chan.context.get_funding_redeemscript();
8960                                 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
8961                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
8962                                 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
8963                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
8964
8965                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
8966                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
8967                                 let mut counterparty_htlc_sigs = Vec::new();
8968                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
8969                                 $({
8970                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8971                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
8972                                         counterparty_htlc_sigs.push(remote_signature);
8973                                 })*
8974                                 assert_eq!(htlcs.len(), per_htlc.len());
8975
8976                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
8977                                         commitment_tx.clone(),
8978                                         counterparty_signature,
8979                                         counterparty_htlc_sigs,
8980                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
8981                                         chan.context.counterparty_funding_pubkey()
8982                                 );
8983                                 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
8984                                 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
8985
8986                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
8987                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
8988                                 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
8989
8990                                 // ((htlc, counterparty_sig), (index, holder_sig))
8991                                 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
8992
8993                                 $({
8994                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
8995                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
8996
8997                                         let ref htlc = htlcs[$htlc_idx];
8998                                         let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
8999                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9000                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9001                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9002                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9003                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9004                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9005
9006                                         let mut preimage: Option<PaymentPreimage> = None;
9007                                         if !htlc.offered {
9008                                                 for i in 0..5 {
9009                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9010                                                         if out == htlc.payment_hash {
9011                                                                 preimage = Some(PaymentPreimage([i; 32]));
9012                                                         }
9013                                                 }
9014
9015                                                 assert!(preimage.is_some());
9016                                         }
9017
9018                                         let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9019                                         let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9020                                                 channel_derivation_parameters: ChannelDerivationParameters {
9021                                                         value_satoshis: chan.context.channel_value_satoshis,
9022                                                         keys_id: chan.context.channel_keys_id,
9023                                                         transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9024                                                 },
9025                                                 commitment_txid: trusted_tx.txid(),
9026                                                 per_commitment_number: trusted_tx.commitment_number(),
9027                                                 per_commitment_point: trusted_tx.per_commitment_point(),
9028                                                 feerate_per_kw: trusted_tx.feerate_per_kw(),
9029                                                 htlc: htlc.clone(),
9030                                                 preimage: preimage.clone(),
9031                                                 counterparty_sig: *htlc_counterparty_sig,
9032                                         }, &secp_ctx).unwrap();
9033                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9034                                         assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9035
9036                                         let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9037                                         assert_eq!(signature, htlc_holder_sig, "htlc sig");
9038                                         let trusted_tx = holder_commitment_tx.trust();
9039                                         htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9040                                         log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9041                                         assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9042                                 })*
9043                                 assert!(htlc_counterparty_sig_iter.next().is_none());
9044                         } }
9045                 }
9046
9047                 // anchors: simple commitment tx with no HTLCs and single anchor
9048                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9049                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9050                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9051
9052                 // simple commitment tx with no HTLCs
9053                 chan.context.value_to_self_msat = 7000000000;
9054
9055                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9056                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9057                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9058
9059                 // anchors: simple commitment tx with no HTLCs
9060                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9061                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9062                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9063
9064                 chan.context.pending_inbound_htlcs.push({
9065                         let mut out = InboundHTLCOutput{
9066                                 htlc_id: 0,
9067                                 amount_msat: 1000000,
9068                                 cltv_expiry: 500,
9069                                 payment_hash: PaymentHash([0; 32]),
9070                                 state: InboundHTLCState::Committed,
9071                         };
9072                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9073                         out
9074                 });
9075                 chan.context.pending_inbound_htlcs.push({
9076                         let mut out = InboundHTLCOutput{
9077                                 htlc_id: 1,
9078                                 amount_msat: 2000000,
9079                                 cltv_expiry: 501,
9080                                 payment_hash: PaymentHash([0; 32]),
9081                                 state: InboundHTLCState::Committed,
9082                         };
9083                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9084                         out
9085                 });
9086                 chan.context.pending_outbound_htlcs.push({
9087                         let mut out = OutboundHTLCOutput{
9088                                 htlc_id: 2,
9089                                 amount_msat: 2000000,
9090                                 cltv_expiry: 502,
9091                                 payment_hash: PaymentHash([0; 32]),
9092                                 state: OutboundHTLCState::Committed,
9093                                 source: HTLCSource::dummy(),
9094                                 skimmed_fee_msat: None,
9095                                 blinding_point: None,
9096                         };
9097                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9098                         out
9099                 });
9100                 chan.context.pending_outbound_htlcs.push({
9101                         let mut out = OutboundHTLCOutput{
9102                                 htlc_id: 3,
9103                                 amount_msat: 3000000,
9104                                 cltv_expiry: 503,
9105                                 payment_hash: PaymentHash([0; 32]),
9106                                 state: OutboundHTLCState::Committed,
9107                                 source: HTLCSource::dummy(),
9108                                 skimmed_fee_msat: None,
9109                                 blinding_point: None,
9110                         };
9111                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9112                         out
9113                 });
9114                 chan.context.pending_inbound_htlcs.push({
9115                         let mut out = InboundHTLCOutput{
9116                                 htlc_id: 4,
9117                                 amount_msat: 4000000,
9118                                 cltv_expiry: 504,
9119                                 payment_hash: PaymentHash([0; 32]),
9120                                 state: InboundHTLCState::Committed,
9121                         };
9122                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9123                         out
9124                 });
9125
9126                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9127                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9128                 chan.context.feerate_per_kw = 0;
9129
9130                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9131                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9132                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9133
9134                                   { 0,
9135                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9136                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9137                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9138
9139                                   { 1,
9140                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9141                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9142                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9143
9144                                   { 2,
9145                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9146                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9147                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9148
9149                                   { 3,
9150                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9151                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9152                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9153
9154                                   { 4,
9155                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9156                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9157                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9158                 } );
9159
9160                 // commitment tx with seven outputs untrimmed (maximum feerate)
9161                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9162                 chan.context.feerate_per_kw = 647;
9163
9164                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9165                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9166                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9167
9168                                   { 0,
9169                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9170                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9171                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9172
9173                                   { 1,
9174                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9175                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9176                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9177
9178                                   { 2,
9179                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9180                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9181                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9182
9183                                   { 3,
9184                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9185                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9186                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9187
9188                                   { 4,
9189                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9190                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9191                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9192                 } );
9193
9194                 // commitment tx with six outputs untrimmed (minimum feerate)
9195                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9196                 chan.context.feerate_per_kw = 648;
9197
9198                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9199                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9200                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9201
9202                                   { 0,
9203                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9204                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9205                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9206
9207                                   { 1,
9208                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9209                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9210                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9211
9212                                   { 2,
9213                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9214                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9215                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9216
9217                                   { 3,
9218                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9219                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9220                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9221                 } );
9222
9223                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9224                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9225                 chan.context.feerate_per_kw = 645;
9226                 chan.context.holder_dust_limit_satoshis = 1001;
9227
9228                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9229                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9230                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9231
9232                                   { 0,
9233                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9234                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9235                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9236
9237                                   { 1,
9238                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9239                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9240                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9241
9242                                   { 2,
9243                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9244                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9245                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9246
9247                                   { 3,
9248                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9249                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9250                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9251                 } );
9252
9253                 // commitment tx with six outputs untrimmed (maximum feerate)
9254                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9255                 chan.context.feerate_per_kw = 2069;
9256                 chan.context.holder_dust_limit_satoshis = 546;
9257
9258                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9259                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9260                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9261
9262                                   { 0,
9263                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9264                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9265                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9266
9267                                   { 1,
9268                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9269                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9270                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9271
9272                                   { 2,
9273                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9274                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9275                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9276
9277                                   { 3,
9278                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9279                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9280                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9281                 } );
9282
9283                 // commitment tx with five outputs untrimmed (minimum feerate)
9284                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9285                 chan.context.feerate_per_kw = 2070;
9286
9287                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9288                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9289                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9290
9291                                   { 0,
9292                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9293                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9294                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9295
9296                                   { 1,
9297                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9298                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9299                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9300
9301                                   { 2,
9302                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9303                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9304                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9305                 } );
9306
9307                 // commitment tx with five outputs untrimmed (maximum feerate)
9308                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9309                 chan.context.feerate_per_kw = 2194;
9310
9311                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9312                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9313                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9314
9315                                   { 0,
9316                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9317                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9318                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9319
9320                                   { 1,
9321                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9322                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9323                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9324
9325                                   { 2,
9326                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9327                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9328                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9329                 } );
9330
9331                 // commitment tx with four outputs untrimmed (minimum feerate)
9332                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9333                 chan.context.feerate_per_kw = 2195;
9334
9335                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9336                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9337                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9338
9339                                   { 0,
9340                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9341                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9342                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9343
9344                                   { 1,
9345                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9346                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9347                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9348                 } );
9349
9350                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9351                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9352                 chan.context.feerate_per_kw = 2185;
9353                 chan.context.holder_dust_limit_satoshis = 2001;
9354                 let cached_channel_type = chan.context.channel_type;
9355                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9356
9357                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9358                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9359                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9360
9361                                   { 0,
9362                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9363                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9364                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9365
9366                                   { 1,
9367                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
9368                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
9369                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9370                 } );
9371
9372                 // commitment tx with four outputs untrimmed (maximum feerate)
9373                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9374                 chan.context.feerate_per_kw = 3702;
9375                 chan.context.holder_dust_limit_satoshis = 546;
9376                 chan.context.channel_type = cached_channel_type.clone();
9377
9378                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
9379                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
9380                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9381
9382                                   { 0,
9383                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
9384                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
9385                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9386
9387                                   { 1,
9388                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
9389                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
9390                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9391                 } );
9392
9393                 // commitment tx with three outputs untrimmed (minimum feerate)
9394                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9395                 chan.context.feerate_per_kw = 3703;
9396
9397                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
9398                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
9399                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9400
9401                                   { 0,
9402                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
9403                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
9404                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9405                 } );
9406
9407                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
9408                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9409                 chan.context.feerate_per_kw = 3687;
9410                 chan.context.holder_dust_limit_satoshis = 3001;
9411                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9412
9413                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
9414                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
9415                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9416
9417                                   { 0,
9418                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
9419                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
9420                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9421                 } );
9422
9423                 // commitment tx with three outputs untrimmed (maximum feerate)
9424                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9425                 chan.context.feerate_per_kw = 4914;
9426                 chan.context.holder_dust_limit_satoshis = 546;
9427                 chan.context.channel_type = cached_channel_type.clone();
9428
9429                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
9430                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
9431                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9432
9433                                   { 0,
9434                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
9435                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
9436                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9437                 } );
9438
9439                 // commitment tx with two outputs untrimmed (minimum feerate)
9440                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9441                 chan.context.feerate_per_kw = 4915;
9442                 chan.context.holder_dust_limit_satoshis = 546;
9443
9444                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
9445                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
9446                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9447
9448                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
9449                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9450                 chan.context.feerate_per_kw = 4894;
9451                 chan.context.holder_dust_limit_satoshis = 4001;
9452                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9453
9454                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
9455                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
9456                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9457
9458                 // commitment tx with two outputs untrimmed (maximum feerate)
9459                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9460                 chan.context.feerate_per_kw = 9651180;
9461                 chan.context.holder_dust_limit_satoshis = 546;
9462                 chan.context.channel_type = cached_channel_type.clone();
9463
9464                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
9465                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
9466                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9467
9468                 // commitment tx with one output untrimmed (minimum feerate)
9469                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9470                 chan.context.feerate_per_kw = 9651181;
9471
9472                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9473                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9474                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9475
9476                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
9477                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9478                 chan.context.feerate_per_kw = 6216010;
9479                 chan.context.holder_dust_limit_satoshis = 4001;
9480                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9481
9482                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
9483                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
9484                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9485
9486                 // commitment tx with fee greater than funder amount
9487                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9488                 chan.context.feerate_per_kw = 9651936;
9489                 chan.context.holder_dust_limit_satoshis = 546;
9490                 chan.context.channel_type = cached_channel_type;
9491
9492                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
9493                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
9494                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9495
9496                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
9497                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
9498                 chan.context.feerate_per_kw = 253;
9499                 chan.context.pending_inbound_htlcs.clear();
9500                 chan.context.pending_inbound_htlcs.push({
9501                         let mut out = InboundHTLCOutput{
9502                                 htlc_id: 1,
9503                                 amount_msat: 2000000,
9504                                 cltv_expiry: 501,
9505                                 payment_hash: PaymentHash([0; 32]),
9506                                 state: InboundHTLCState::Committed,
9507                         };
9508                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9509                         out
9510                 });
9511                 chan.context.pending_outbound_htlcs.clear();
9512                 chan.context.pending_outbound_htlcs.push({
9513                         let mut out = OutboundHTLCOutput{
9514                                 htlc_id: 6,
9515                                 amount_msat: 5000001,
9516                                 cltv_expiry: 506,
9517                                 payment_hash: PaymentHash([0; 32]),
9518                                 state: OutboundHTLCState::Committed,
9519                                 source: HTLCSource::dummy(),
9520                                 skimmed_fee_msat: None,
9521                                 blinding_point: None,
9522                         };
9523                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9524                         out
9525                 });
9526                 chan.context.pending_outbound_htlcs.push({
9527                         let mut out = OutboundHTLCOutput{
9528                                 htlc_id: 5,
9529                                 amount_msat: 5000000,
9530                                 cltv_expiry: 505,
9531                                 payment_hash: PaymentHash([0; 32]),
9532                                 state: OutboundHTLCState::Committed,
9533                                 source: HTLCSource::dummy(),
9534                                 skimmed_fee_msat: None,
9535                                 blinding_point: None,
9536                         };
9537                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
9538                         out
9539                 });
9540
9541                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
9542                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
9543                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9544
9545                                   { 0,
9546                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
9547                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
9548                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9549                                   { 1,
9550                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
9551                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
9552                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
9553                                   { 2,
9554                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
9555                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
9556                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
9557                 } );
9558
9559                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9560                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
9561                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
9562                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9563
9564                                   { 0,
9565                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
9566                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
9567                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9568                                   { 1,
9569                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
9570                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
9571                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
9572                                   { 2,
9573                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
9574                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
9575                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
9576                 } );
9577         }
9578
9579         #[test]
9580         fn test_per_commitment_secret_gen() {
9581                 // Test vectors from BOLT 3 Appendix D:
9582
9583                 let mut seed = [0; 32];
9584                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
9585                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9586                            <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
9587
9588                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
9589                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
9590                            <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
9591
9592                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
9593                            <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
9594
9595                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
9596                            <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
9597
9598                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
9599                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
9600                            <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
9601         }
9602
9603         #[test]
9604         fn test_key_derivation() {
9605                 // Test vectors from BOLT 3 Appendix E:
9606                 let secp_ctx = Secp256k1::new();
9607
9608                 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
9609                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9610
9611                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
9612                 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
9613
9614                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9615                 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
9616
9617                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
9618                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
9619
9620                 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
9621                                 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
9622
9623                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
9624                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
9625         }
9626
9627         #[test]
9628         fn test_zero_conf_channel_type_support() {
9629                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9630                 let secp_ctx = Secp256k1::new();
9631                 let seed = [42; 32];
9632                 let network = Network::Testnet;
9633                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9634                 let logger = test_utils::TestLogger::new();
9635
9636                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9637                 let config = UserConfig::default();
9638                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9639                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9640
9641                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9642                 channel_type_features.set_zero_conf_required();
9643
9644                 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9645                 open_channel_msg.channel_type = Some(channel_type_features);
9646                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9647                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
9648                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9649                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
9650                 assert!(res.is_ok());
9651         }
9652
9653         #[test]
9654         fn test_supports_anchors_zero_htlc_tx_fee() {
9655                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
9656                 // resulting `channel_type`.
9657                 let secp_ctx = Secp256k1::new();
9658                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9659                 let network = Network::Testnet;
9660                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9661                 let logger = test_utils::TestLogger::new();
9662
9663                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9664                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9665
9666                 let mut config = UserConfig::default();
9667                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
9668
9669                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
9670                 // need to signal it.
9671                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9672                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9673                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
9674                         &config, 0, 42, None
9675                 ).unwrap();
9676                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
9677
9678                 let mut expected_channel_type = ChannelTypeFeatures::empty();
9679                 expected_channel_type.set_static_remote_key_required();
9680                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
9681
9682                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9683                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9684                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9685                         None
9686                 ).unwrap();
9687
9688                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9689                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9690                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9691                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9692                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9693                 ).unwrap();
9694
9695                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
9696                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
9697         }
9698
9699         #[test]
9700         fn test_rejects_implicit_simple_anchors() {
9701                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
9702                 // each side's `InitFeatures`, it is rejected.
9703                 let secp_ctx = Secp256k1::new();
9704                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9705                 let network = Network::Testnet;
9706                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9707                 let logger = test_utils::TestLogger::new();
9708
9709                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9710                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9711
9712                 let config = UserConfig::default();
9713
9714                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9715                 let static_remote_key_required: u64 = 1 << 12;
9716                 let simple_anchors_required: u64 = 1 << 20;
9717                 let raw_init_features = static_remote_key_required | simple_anchors_required;
9718                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
9719
9720                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9721                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9722                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9723                         None
9724                 ).unwrap();
9725
9726                 // Set `channel_type` to `None` to force the implicit feature negotiation.
9727                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9728                 open_channel_msg.channel_type = None;
9729
9730                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
9731                 // `static_remote_key`, it will fail the channel.
9732                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9733                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9734                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
9735                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9736                 );
9737                 assert!(channel_b.is_err());
9738         }
9739
9740         #[test]
9741         fn test_rejects_simple_anchors_channel_type() {
9742                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
9743                 // it is rejected.
9744                 let secp_ctx = Secp256k1::new();
9745                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9746                 let network = Network::Testnet;
9747                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
9748                 let logger = test_utils::TestLogger::new();
9749
9750                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
9751                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
9752
9753                 let config = UserConfig::default();
9754
9755                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
9756                 let static_remote_key_required: u64 = 1 << 12;
9757                 let simple_anchors_required: u64 = 1 << 20;
9758                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
9759                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9760                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
9761                 assert!(!simple_anchors_init.requires_unknown_bits());
9762                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
9763
9764                 // First, we'll try to open a channel between A and B where A requests a channel type for
9765                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
9766                 // B as it's not supported by LDK.
9767                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9768                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
9769                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
9770                         None
9771                 ).unwrap();
9772
9773                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9774                 open_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9775
9776                 let res = InboundV1Channel::<&TestKeysInterface>::new(
9777                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9778                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
9779                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9780                 );
9781                 assert!(res.is_err());
9782
9783                 // Then, we'll try to open another channel where A requests a channel type for
9784                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
9785                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
9786                 // LDK.
9787                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
9788                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
9789                         10000000, 100000, 42, &config, 0, 42, None
9790                 ).unwrap();
9791
9792                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
9793
9794                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
9795                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
9796                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
9797                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
9798                 ).unwrap();
9799
9800                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
9801                 accept_channel_msg.channel_type = Some(simple_anchors_channel_type.clone());
9802
9803                 let res = channel_a.accept_channel(
9804                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
9805                 );
9806                 assert!(res.is_err());
9807         }
9808
9809         #[test]
9810         fn test_waiting_for_batch() {
9811                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9812                 let logger = test_utils::TestLogger::new();
9813                 let secp_ctx = Secp256k1::new();
9814                 let seed = [42; 32];
9815                 let network = Network::Testnet;
9816                 let best_block = BestBlock::from_network(network);
9817                 let chain_hash = ChainHash::using_genesis_block(network);
9818                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9819
9820                 let mut config = UserConfig::default();
9821                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
9822                 // channel in a batch before all channels are ready.
9823                 config.channel_handshake_limits.trust_own_funding_0conf = true;
9824
9825                 // Create a channel from node a to node b that will be part of batch funding.
9826                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9827                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9828                         &feeest,
9829                         &&keys_provider,
9830                         &&keys_provider,
9831                         node_b_node_id,
9832                         &channelmanager::provided_init_features(&config),
9833                         10000000,
9834                         100000,
9835                         42,
9836                         &config,
9837                         0,
9838                         42,
9839                         None
9840                 ).unwrap();
9841
9842                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9843                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9844                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
9845                         &feeest,
9846                         &&keys_provider,
9847                         &&keys_provider,
9848                         node_b_node_id,
9849                         &channelmanager::provided_channel_type_features(&config),
9850                         &channelmanager::provided_init_features(&config),
9851                         &open_channel_msg,
9852                         7,
9853                         &config,
9854                         0,
9855                         &&logger,
9856                         true,  // Allow node b to send a 0conf channel_ready.
9857                 ).unwrap();
9858
9859                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9860                 node_a_chan.accept_channel(
9861                         &accept_channel_msg,
9862                         &config.channel_handshake_limits,
9863                         &channelmanager::provided_init_features(&config),
9864                 ).unwrap();
9865
9866                 // Fund the channel with a batch funding transaction.
9867                 let output_script = node_a_chan.context.get_funding_redeemscript();
9868                 let tx = Transaction {
9869                         version: 1,
9870                         lock_time: LockTime::ZERO,
9871                         input: Vec::new(),
9872                         output: vec![
9873                                 TxOut {
9874                                         value: 10000000, script_pubkey: output_script.clone(),
9875                                 },
9876                                 TxOut {
9877                                         value: 10000000, script_pubkey: Builder::new().into_script(),
9878                                 },
9879                         ]};
9880                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9881                 let funding_created_msg = node_a_chan.get_funding_created(
9882                         tx.clone(), funding_outpoint, true, &&logger,
9883                 ).map_err(|_| ()).unwrap();
9884                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
9885                         &funding_created_msg.unwrap(),
9886                         best_block,
9887                         &&keys_provider,
9888                         &&logger,
9889                 ).map_err(|_| ()).unwrap();
9890                 let node_b_updates = node_b_chan.monitor_updating_restored(
9891                         &&logger,
9892                         &&keys_provider,
9893                         chain_hash,
9894                         &config,
9895                         0,
9896                 );
9897
9898                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
9899                 // broadcasting the funding transaction until the batch is ready.
9900                 let res = node_a_chan.funding_signed(
9901                         &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
9902                 );
9903                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9904                 let node_a_updates = node_a_chan.monitor_updating_restored(
9905                         &&logger,
9906                         &&keys_provider,
9907                         chain_hash,
9908                         &config,
9909                         0,
9910                 );
9911                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
9912                 // as the funding transaction depends on all channels in the batch becoming ready.
9913                 assert!(node_a_updates.channel_ready.is_none());
9914                 assert!(node_a_updates.funding_broadcastable.is_none());
9915                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
9916
9917                 // It is possible to receive a 0conf channel_ready from the remote node.
9918                 node_a_chan.channel_ready(
9919                         &node_b_updates.channel_ready.unwrap(),
9920                         &&keys_provider,
9921                         chain_hash,
9922                         &config,
9923                         &best_block,
9924                         &&logger,
9925                 ).unwrap();
9926                 assert_eq!(
9927                         node_a_chan.context.channel_state,
9928                         ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
9929                 );
9930
9931                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
9932                 node_a_chan.set_batch_ready();
9933                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
9934                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
9935         }
9936 }