]> git.bitcoin.ninja Git - rust-lightning/blob - lightning/src/ln/channel.rs
Add HolderCommitmentPoint struct to ChannelContext
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::sighash;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
16
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
21
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
26
27 use crate::ln::types::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs;
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::EcdsaChannelSigner;
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
49
50 use crate::io;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::ops::Deref;
54 #[cfg(any(test, fuzzing, debug_assertions))]
55 use crate::sync::Mutex;
56 use crate::sign::type_resolver::ChannelSignerType;
57
58 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
59
60 #[cfg(test)]
61 pub struct ChannelValueStat {
62         pub value_to_self_msat: u64,
63         pub channel_value_msat: u64,
64         pub channel_reserve_msat: u64,
65         pub pending_outbound_htlcs_amount_msat: u64,
66         pub pending_inbound_htlcs_amount_msat: u64,
67         pub holding_cell_outbound_amount_msat: u64,
68         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
69         pub counterparty_dust_limit_msat: u64,
70 }
71
72 pub struct AvailableBalances {
73         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
74         pub balance_msat: u64,
75         /// Total amount available for our counterparty to send to us.
76         pub inbound_capacity_msat: u64,
77         /// Total amount available for us to send to our counterparty.
78         pub outbound_capacity_msat: u64,
79         /// The maximum value we can assign to the next outbound HTLC
80         pub next_outbound_htlc_limit_msat: u64,
81         /// The minimum value we can assign to the next outbound HTLC
82         pub next_outbound_htlc_minimum_msat: u64,
83 }
84
85 #[derive(Debug, Clone, Copy, PartialEq)]
86 enum FeeUpdateState {
87         // Inbound states mirroring InboundHTLCState
88         RemoteAnnounced,
89         AwaitingRemoteRevokeToAnnounce,
90         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
91         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
92         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
93         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
94         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
95
96         // Outbound state can only be `LocalAnnounced` or `Committed`
97         Outbound,
98 }
99
100 enum InboundHTLCRemovalReason {
101         FailRelay(msgs::OnionErrorPacket),
102         FailMalformed(([u8; 32], u16)),
103         Fulfill(PaymentPreimage),
104 }
105
106 /// Represents the resolution status of an inbound HTLC.
107 #[derive(Clone)]
108 enum InboundHTLCResolution {
109         /// Resolved implies the action we must take with the inbound HTLC has already been determined,
110         /// i.e., we already know whether it must be failed back or forwarded.
111         //
112         // TODO: Once this variant is removed, we should also clean up
113         // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable.
114         Resolved {
115                 pending_htlc_status: PendingHTLCStatus,
116         },
117         /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed
118         /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both
119         /// nodes for the state update in which it was proposed.
120         Pending {
121                 update_add_htlc: msgs::UpdateAddHTLC,
122         },
123 }
124
125 impl_writeable_tlv_based_enum!(InboundHTLCResolution,
126         (0, Resolved) => {
127                 (0, pending_htlc_status, required),
128         },
129         (2, Pending) => {
130                 (0, update_add_htlc, required),
131         };
132 );
133
134 enum InboundHTLCState {
135         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
136         /// update_add_htlc message for this HTLC.
137         RemoteAnnounced(InboundHTLCResolution),
138         /// Included in a received commitment_signed message (implying we've
139         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
140         /// state (see the example below). We have not yet included this HTLC in a
141         /// commitment_signed message because we are waiting on the remote's
142         /// aforementioned state revocation. One reason this missing remote RAA
143         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
144         /// is because every time we create a new "state", i.e. every time we sign a
145         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
146         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
147         /// sent provided the per_commitment_point for our current commitment tx.
148         /// The other reason we should not send a commitment_signed without their RAA
149         /// is because their RAA serves to ACK our previous commitment_signed.
150         ///
151         /// Here's an example of how an HTLC could come to be in this state:
152         /// remote --> update_add_htlc(prev_htlc)   --> local
153         /// remote --> commitment_signed(prev_htlc) --> local
154         /// remote <-- revoke_and_ack               <-- local
155         /// remote <-- commitment_signed(prev_htlc) <-- local
156         /// [note that here, the remote does not respond with a RAA]
157         /// remote --> update_add_htlc(this_htlc)   --> local
158         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
159         /// Now `this_htlc` will be assigned this state. It's unable to be officially
160         /// accepted, i.e. included in a commitment_signed, because we're missing the
161         /// RAA that provides our next per_commitment_point. The per_commitment_point
162         /// is used to derive commitment keys, which are used to construct the
163         /// signatures in a commitment_signed message.
164         /// Implies AwaitingRemoteRevoke.
165         ///
166         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
167         AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution),
168         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
169         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
170         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
171         /// channel (before it can then get forwarded and/or removed).
172         /// Implies AwaitingRemoteRevoke.
173         AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution),
174         Committed,
175         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
176         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
177         /// we'll drop it.
178         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
179         /// commitment transaction without it as otherwise we'll have to force-close the channel to
180         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
181         /// anyway). That said, ChannelMonitor does this for us (see
182         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
183         /// our own local state before then, once we're sure that the next commitment_signed and
184         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
185         LocalRemoved(InboundHTLCRemovalReason),
186 }
187
188 /// Exposes the state of pending inbound HTLCs.
189 ///
190 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
191 /// through the following states in the state machine:
192 /// - Announced for addition by the originating node through the update_add_htlc message.
193 /// - Added to the commitment transaction of the receiving node and originating node in turn
194 ///   through the exchange of commitment_signed and revoke_and_ack messages.
195 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
196 ///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
197 /// - Removed from the commitment transaction of the originating node and receiving node in turn
198 ///   through the exchange of commitment_signed and revoke_and_ack messages.
199 ///
200 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
201 #[derive(Clone, Debug, PartialEq)]
202 pub enum InboundHTLCStateDetails {
203         /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
204         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
205         /// before this HTLC is included on the remote commitment transaction.
206         AwaitingRemoteRevokeToAdd,
207         /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
208         /// and is included in both commitment transactions.
209         ///
210         /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
211         /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
212         /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
213         /// payment, it will only be claimed together with other required parts.
214         Committed,
215         /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
216         /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
217         /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
218         /// commitment transaction after update_fulfill_htlc.
219         AwaitingRemoteRevokeToRemoveFulfill,
220         /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
221         /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
222         /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
223         /// transaction.
224         AwaitingRemoteRevokeToRemoveFail,
225 }
226
227 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
228         fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
229                 match state {
230                         InboundHTLCState::RemoteAnnounced(_) => None,
231                         InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
232                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
233                         InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
234                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
235                         InboundHTLCState::Committed =>
236                                 Some(InboundHTLCStateDetails::Committed),
237                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
238                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
239                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
240                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
241                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
242                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
243                 }
244         }
245 }
246
247 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
248         (0, AwaitingRemoteRevokeToAdd) => {},
249         (2, Committed) => {},
250         (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
251         (6, AwaitingRemoteRevokeToRemoveFail) => {};
252 );
253
254 struct InboundHTLCOutput {
255         htlc_id: u64,
256         amount_msat: u64,
257         cltv_expiry: u32,
258         payment_hash: PaymentHash,
259         state: InboundHTLCState,
260 }
261
262 /// Exposes details around pending inbound HTLCs.
263 #[derive(Clone, Debug, PartialEq)]
264 pub struct InboundHTLCDetails {
265         /// The HTLC ID.
266         /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
267         /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
268         /// and not part of any commitment transaction.
269         pub htlc_id: u64,
270         /// The amount in msat.
271         pub amount_msat: u64,
272         /// The block height at which this HTLC expires.
273         pub cltv_expiry: u32,
274         /// The payment hash.
275         pub payment_hash: PaymentHash,
276         /// The state of the HTLC in the state machine.
277         ///
278         /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
279         /// waiting for to advance to the next state.
280         ///
281         /// See [`InboundHTLCStateDetails`] for information on the specific states.
282         ///
283         /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
284         /// states may result in `None` here.
285         pub state: Option<InboundHTLCStateDetails>,
286         /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
287         /// from the local commitment transaction and added to the commitment transaction fee.
288         /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
289         /// transactions as well.
290         ///
291         /// When the local commitment transaction is broadcasted as part of a unilateral closure,
292         /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
293         /// fee.
294         ///
295         /// Note that dust limits are specific to each party. An HTLC can be dust for the local
296         /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
297         pub is_dust: bool,
298 }
299
300 impl_writeable_tlv_based!(InboundHTLCDetails, {
301         (0, htlc_id, required),
302         (2, amount_msat, required),
303         (4, cltv_expiry, required),
304         (6, payment_hash, required),
305         (7, state, upgradable_option),
306         (8, is_dust, required),
307 });
308
309 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
310 enum OutboundHTLCState {
311         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
312         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
313         /// we will promote to Committed (note that they may not accept it until the next time we
314         /// revoke, but we don't really care about that:
315         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
316         ///    money back (though we won't), and,
317         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
318         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
319         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
320         ///    we'll never get out of sync).
321         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
322         /// OutboundHTLCOutput's size just for a temporary bit
323         LocalAnnounced(Box<msgs::OnionPacket>),
324         Committed,
325         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
326         /// the change (though they'll need to revoke before we fail the payment).
327         RemoteRemoved(OutboundHTLCOutcome),
328         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
329         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
330         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
331         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
332         /// remote revoke_and_ack on a previous state before we can do so.
333         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
334         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
335         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
336         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
337         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
338         /// revoke_and_ack to drop completely.
339         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
340 }
341
342 /// Exposes the state of pending outbound HTLCs.
343 ///
344 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
345 /// through the following states in the state machine:
346 /// - Announced for addition by the originating node through the update_add_htlc message.
347 /// - Added to the commitment transaction of the receiving node and originating node in turn
348 ///   through the exchange of commitment_signed and revoke_and_ack messages.
349 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
350 ///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
351 /// - Removed from the commitment transaction of the originating node and receiving node in turn
352 ///   through the exchange of commitment_signed and revoke_and_ack messages.
353 ///
354 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
355 #[derive(Clone, Debug, PartialEq)]
356 pub enum OutboundHTLCStateDetails {
357         /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
358         /// on the remote's commitment transaction after update_add_htlc.
359         AwaitingRemoteRevokeToAdd,
360         /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
361         /// and receiving revoke_and_ack in return.
362         ///
363         /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
364         /// unilaterally close the channel due to a timeout with an uncooperative remote node.
365         Committed,
366         /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
367         /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
368         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
369         /// for the removal from its commitment transaction.
370         AwaitingRemoteRevokeToRemoveSuccess,
371         /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
372         /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
373         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
374         /// for the removal from its commitment transaction.
375         AwaitingRemoteRevokeToRemoveFailure,
376 }
377
378 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
379         fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
380                 match state {
381                         OutboundHTLCState::LocalAnnounced(_) =>
382                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
383                         OutboundHTLCState::Committed =>
384                                 OutboundHTLCStateDetails::Committed,
385                         // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
386                         // the state yet.
387                         OutboundHTLCState::RemoteRemoved(_) =>
388                                 OutboundHTLCStateDetails::Committed,
389                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
390                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
391                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
392                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
393                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
394                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
395                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
396                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
397                 }
398         }
399 }
400
401 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
402         (0, AwaitingRemoteRevokeToAdd) => {},
403         (2, Committed) => {},
404         (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
405         (6, AwaitingRemoteRevokeToRemoveFailure) => {};
406 );
407
408 #[derive(Clone)]
409 #[cfg_attr(test, derive(Debug, PartialEq))]
410 enum OutboundHTLCOutcome {
411         /// LDK version 0.0.105+ will always fill in the preimage here.
412         Success(Option<PaymentPreimage>),
413         Failure(HTLCFailReason),
414 }
415
416 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
417         fn from(o: Option<HTLCFailReason>) -> Self {
418                 match o {
419                         None => OutboundHTLCOutcome::Success(None),
420                         Some(r) => OutboundHTLCOutcome::Failure(r)
421                 }
422         }
423 }
424
425 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
426         fn into(self) -> Option<&'a HTLCFailReason> {
427                 match self {
428                         OutboundHTLCOutcome::Success(_) => None,
429                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
430                 }
431         }
432 }
433
434 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
435 struct OutboundHTLCOutput {
436         htlc_id: u64,
437         amount_msat: u64,
438         cltv_expiry: u32,
439         payment_hash: PaymentHash,
440         state: OutboundHTLCState,
441         source: HTLCSource,
442         blinding_point: Option<PublicKey>,
443         skimmed_fee_msat: Option<u64>,
444 }
445
446 /// Exposes details around pending outbound HTLCs.
447 #[derive(Clone, Debug, PartialEq)]
448 pub struct OutboundHTLCDetails {
449         /// The HTLC ID.
450         /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
451         /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
452         /// and not part of any commitment transaction.
453         ///
454         /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
455         pub htlc_id: Option<u64>,
456         /// The amount in msat.
457         pub amount_msat: u64,
458         /// The block height at which this HTLC expires.
459         pub cltv_expiry: u32,
460         /// The payment hash.
461         pub payment_hash: PaymentHash,
462         /// The state of the HTLC in the state machine.
463         ///
464         /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
465         /// waiting for to advance to the next state.
466         ///
467         /// See [`OutboundHTLCStateDetails`] for information on the specific states.
468         ///
469         /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
470         /// states may result in `None` here.
471         pub state: Option<OutboundHTLCStateDetails>,
472         /// The extra fee being skimmed off the top of this HTLC.
473         pub skimmed_fee_msat: Option<u64>,
474         /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
475         /// from the local commitment transaction and added to the commitment transaction fee.
476         /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
477         /// transactions as well.
478         ///
479         /// When the local commitment transaction is broadcasted as part of a unilateral closure,
480         /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
481         /// fee.
482         ///
483         /// Note that dust limits are specific to each party. An HTLC can be dust for the local
484         /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
485         pub is_dust: bool,
486 }
487
488 impl_writeable_tlv_based!(OutboundHTLCDetails, {
489         (0, htlc_id, required),
490         (2, amount_msat, required),
491         (4, cltv_expiry, required),
492         (6, payment_hash, required),
493         (7, state, upgradable_option),
494         (8, skimmed_fee_msat, required),
495         (10, is_dust, required),
496 });
497
498 /// See AwaitingRemoteRevoke ChannelState for more info
499 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
500 enum HTLCUpdateAwaitingACK {
501         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
502                 // always outbound
503                 amount_msat: u64,
504                 cltv_expiry: u32,
505                 payment_hash: PaymentHash,
506                 source: HTLCSource,
507                 onion_routing_packet: msgs::OnionPacket,
508                 // The extra fee we're skimming off the top of this HTLC.
509                 skimmed_fee_msat: Option<u64>,
510                 blinding_point: Option<PublicKey>,
511         },
512         ClaimHTLC {
513                 payment_preimage: PaymentPreimage,
514                 htlc_id: u64,
515         },
516         FailHTLC {
517                 htlc_id: u64,
518                 err_packet: msgs::OnionErrorPacket,
519         },
520         FailMalformedHTLC {
521                 htlc_id: u64,
522                 failure_code: u16,
523                 sha256_of_onion: [u8; 32],
524         },
525 }
526
527 macro_rules! define_state_flags {
528         ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
529                 #[doc = $flag_type_doc]
530                 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
531                 struct $flag_type(u32);
532
533                 impl $flag_type {
534                         $(
535                                 #[doc = $flag_doc]
536                                 const $flag: $flag_type = $flag_type($value);
537                         )*
538
539                         /// All flags that apply to the specified [`ChannelState`] variant.
540                         #[allow(unused)]
541                         const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
542
543                         #[allow(unused)]
544                         fn new() -> Self { Self(0) }
545
546                         #[allow(unused)]
547                         fn from_u32(flags: u32) -> Result<Self, ()> {
548                                 if flags & !Self::ALL.0 != 0 {
549                                         Err(())
550                                 } else {
551                                         Ok($flag_type(flags))
552                                 }
553                         }
554
555                         #[allow(unused)]
556                         fn is_empty(&self) -> bool { self.0 == 0 }
557                         #[allow(unused)]
558                         fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
559                         #[allow(unused)]
560                         fn set(&mut self, flag: Self) { *self |= flag }
561                         #[allow(unused)]
562                         fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
563                 }
564
565                 $(
566                         define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
567                 )*
568
569                 impl core::ops::BitOr for $flag_type {
570                         type Output = Self;
571                         fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
572                 }
573                 impl core::ops::BitOrAssign for $flag_type {
574                         fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
575                 }
576                 impl core::ops::BitAnd for $flag_type {
577                         type Output = Self;
578                         fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
579                 }
580                 impl core::ops::BitAndAssign for $flag_type {
581                         fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
582                 }
583         };
584         ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
585                 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
586         };
587         ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
588                 impl $flag_type {
589                         #[allow(unused)]
590                         fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
591                         #[allow(unused)]
592                         fn $set(&mut self) { self.set($flag_type::new() | $flag) }
593                         #[allow(unused)]
594                         fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
595                 }
596         };
597         ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
598                 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
599
600                 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
601                         is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
602                 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
603                         is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
604                 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
605                         is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
606                 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
607                         is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
608
609                 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
610                         type Output = Self;
611                         fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
612                 }
613                 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
614                         fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
615                 }
616                 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
617                         type Output = Self;
618                         fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
619                 }
620                 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
621                         fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
622                 }
623                 impl PartialEq<FundedStateFlags> for $flag_type {
624                         fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
625                 }
626                 impl From<FundedStateFlags> for $flag_type {
627                         fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
628                 }
629         };
630 }
631
632 /// We declare all the states/flags here together to help determine which bits are still available
633 /// to choose.
634 mod state_flags {
635         pub const OUR_INIT_SENT: u32 = 1 << 0;
636         pub const THEIR_INIT_SENT: u32 = 1 << 1;
637         pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
638         pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
639         pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
640         pub const OUR_CHANNEL_READY: u32 = 1 << 5;
641         pub const CHANNEL_READY: u32 = 1 << 6;
642         pub const PEER_DISCONNECTED: u32 = 1 << 7;
643         pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
644         pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
645         pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
646         pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
647         pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
648         pub const WAITING_FOR_BATCH: u32 = 1 << 13;
649 }
650
651 define_state_flags!(
652         "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
653         FundedStateFlags, [
654                 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
655                         until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
656                         is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
657                 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
658                         somewhere and we should pause sending any outbound messages until they've managed to \
659                         complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
660                         is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
661                 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
662                         any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
663                         message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
664                         is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
665                 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
666                         the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
667                         is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
668         ]
669 );
670
671 define_state_flags!(
672         "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
673         NegotiatingFundingFlags, [
674                 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
675                         OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
676                 ("Indicates we have received their `open_channel`/`accept_channel` message.",
677                         THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
678         ]
679 );
680
681 define_state_flags!(
682         "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
683         FUNDED_STATE, AwaitingChannelReadyFlags, [
684                 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
685                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
686                         THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
687                         is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
688                 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
689                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
690                         OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
691                         is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
692                 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
693                         is being held until all channels in the batch have received `funding_signed` and have \
694                         their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
695                         is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
696         ]
697 );
698
699 define_state_flags!(
700         "Flags that only apply to [`ChannelState::ChannelReady`].",
701         FUNDED_STATE, ChannelReadyFlags, [
702                 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
703                         `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
704                         messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
705                         implicit ACK, so instead we have to hold them away temporarily to be sent later.",
706                         AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
707                         is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
708         ]
709 );
710
711 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
712 // into account when introducing new states and update `test_channel_state_order` accordingly.
713 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
714 enum ChannelState {
715         /// We are negotiating the parameters required for the channel prior to funding it.
716         NegotiatingFunding(NegotiatingFundingFlags),
717         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
718         /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
719         /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
720         FundingNegotiated,
721         /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
722         /// funding transaction to confirm.
723         AwaitingChannelReady(AwaitingChannelReadyFlags),
724         /// Both we and our counterparty consider the funding transaction confirmed and the channel is
725         /// now operational.
726         ChannelReady(ChannelReadyFlags),
727         /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
728         /// is about to drop us, but we store this anyway.
729         ShutdownComplete,
730 }
731
732 macro_rules! impl_state_flag {
733         ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
734                 #[allow(unused)]
735                 fn $get(&self) -> bool {
736                         match self {
737                                 $(
738                                         ChannelState::$state(flags) => flags.$get(),
739                                 )*
740                                 _ => false,
741                         }
742                 }
743                 #[allow(unused)]
744                 fn $set(&mut self) {
745                         match self {
746                                 $(
747                                         ChannelState::$state(flags) => flags.$set(),
748                                 )*
749                                 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
750                         }
751                 }
752                 #[allow(unused)]
753                 fn $clear(&mut self) {
754                         match self {
755                                 $(
756                                         ChannelState::$state(flags) => { let _ = flags.$clear(); },
757                                 )*
758                                 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
759                         }
760                 }
761         };
762         ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
763                 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
764         };
765         ($get: ident, $set: ident, $clear: ident, $state: ident) => {
766                 impl_state_flag!($get, $set, $clear, [$state]);
767         };
768 }
769
770 impl ChannelState {
771         fn from_u32(state: u32) -> Result<Self, ()> {
772                 match state {
773                         state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
774                         state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
775                         val => {
776                                 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
777                                         AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
778                                                 .map(|flags| ChannelState::AwaitingChannelReady(flags))
779                                 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
780                                         ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
781                                                 .map(|flags| ChannelState::ChannelReady(flags))
782                                 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
783                                         Ok(ChannelState::NegotiatingFunding(flags))
784                                 } else {
785                                         Err(())
786                                 }
787                         },
788                 }
789         }
790
791         fn to_u32(&self) -> u32 {
792                 match self {
793                         ChannelState::NegotiatingFunding(flags) => flags.0,
794                         ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
795                         ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
796                         ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
797                         ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
798                 }
799         }
800
801         fn is_pre_funded_state(&self) -> bool {
802                 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
803         }
804
805         fn is_both_sides_shutdown(&self) -> bool {
806                 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
807         }
808
809         fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
810                 match self {
811                         ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
812                         ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
813                         _ => FundedStateFlags::new(),
814                 }
815         }
816
817         fn can_generate_new_commitment(&self) -> bool {
818                 match self {
819                         ChannelState::ChannelReady(flags) =>
820                                 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
821                                         !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
822                                         !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
823                         _ => {
824                                 debug_assert!(false, "Can only generate new commitment within ChannelReady");
825                                 false
826                         },
827                 }
828         }
829
830         impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
831         impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
832         impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
833         impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
834         impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
835         impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
836         impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
837         impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
838 }
839
840 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
841
842 pub const DEFAULT_MAX_HTLCS: u16 = 50;
843
844 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
845         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
846         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
847         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
848 }
849
850 #[cfg(not(test))]
851 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
852 #[cfg(test)]
853 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
854
855 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
856
857 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
858 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
859 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
860 /// `holder_max_htlc_value_in_flight_msat`.
861 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
862
863 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
864 /// `option_support_large_channel` (aka wumbo channels) is not supported.
865 /// It's 2^24 - 1.
866 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
867
868 /// Total bitcoin supply in satoshis.
869 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
870
871 /// The maximum network dust limit for standard script formats. This currently represents the
872 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
873 /// transaction non-standard and thus refuses to relay it.
874 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
875 /// implementations use this value for their dust limit today.
876 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
877
878 /// The maximum channel dust limit we will accept from our counterparty.
879 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
880
881 /// The dust limit is used for both the commitment transaction outputs as well as the closing
882 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
883 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
884 /// In order to avoid having to concern ourselves with standardness during the closing process, we
885 /// simply require our counterparty to use a dust limit which will leave any segwit output
886 /// standard.
887 /// See <https://github.com/lightning/bolts/issues/905> for more details.
888 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
889
890 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
891 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
892
893 /// Used to return a simple Error back to ChannelManager. Will get converted to a
894 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
895 /// channel_id in ChannelManager.
896 pub(super) enum ChannelError {
897         Ignore(String),
898         Warn(String),
899         Close(String),
900 }
901
902 impl fmt::Debug for ChannelError {
903         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
904                 match self {
905                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
906                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
907                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
908                 }
909         }
910 }
911
912 impl fmt::Display for ChannelError {
913         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
914                 match self {
915                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
916                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
917                         &ChannelError::Close(ref e) => write!(f, "{}", e),
918                 }
919         }
920 }
921
922 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
923         pub logger: &'a L,
924         pub peer_id: Option<PublicKey>,
925         pub channel_id: Option<ChannelId>,
926         pub payment_hash: Option<PaymentHash>,
927 }
928
929 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
930         fn log(&self, mut record: Record) {
931                 record.peer_id = self.peer_id;
932                 record.channel_id = self.channel_id;
933                 record.payment_hash = self.payment_hash;
934                 self.logger.log(record)
935         }
936 }
937
938 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
939 where L::Target: Logger {
940         pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>, payment_hash: Option<PaymentHash>) -> Self
941         where S::Target: SignerProvider
942         {
943                 WithChannelContext {
944                         logger,
945                         peer_id: Some(context.counterparty_node_id),
946                         channel_id: Some(context.channel_id),
947                         payment_hash
948                 }
949         }
950 }
951
952 macro_rules! secp_check {
953         ($res: expr, $err: expr) => {
954                 match $res {
955                         Ok(thing) => thing,
956                         Err(_) => return Err(ChannelError::Close($err)),
957                 }
958         };
959 }
960
961 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
962 /// our counterparty or not. However, we don't want to announce updates right away to avoid
963 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
964 /// our channel_update message and track the current state here.
965 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
966 #[derive(Clone, Copy, PartialEq)]
967 pub(super) enum ChannelUpdateStatus {
968         /// We've announced the channel as enabled and are connected to our peer.
969         Enabled,
970         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
971         DisabledStaged(u8),
972         /// Our channel is live again, but we haven't announced the channel as enabled yet.
973         EnabledStaged(u8),
974         /// We've announced the channel as disabled.
975         Disabled,
976 }
977
978 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
979 #[derive(PartialEq)]
980 pub enum AnnouncementSigsState {
981         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
982         /// we sent the last `AnnouncementSignatures`.
983         NotSent,
984         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
985         /// This state never appears on disk - instead we write `NotSent`.
986         MessageSent,
987         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
988         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
989         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
990         /// they send back a `RevokeAndACK`.
991         /// This state never appears on disk - instead we write `NotSent`.
992         Committed,
993         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
994         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
995         PeerReceived,
996 }
997
998 /// An enum indicating whether the local or remote side offered a given HTLC.
999 enum HTLCInitiator {
1000         LocalOffered,
1001         RemoteOffered,
1002 }
1003
1004 /// Current counts of various HTLCs, useful for calculating current balances available exactly.
1005 struct HTLCStats {
1006         pending_inbound_htlcs: usize,
1007         pending_outbound_htlcs: usize,
1008         pending_inbound_htlcs_value_msat: u64,
1009         pending_outbound_htlcs_value_msat: u64,
1010         on_counterparty_tx_dust_exposure_msat: u64,
1011         on_holder_tx_dust_exposure_msat: u64,
1012         outbound_holding_cell_msat: u64,
1013         on_holder_tx_outbound_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
1014 }
1015
1016 /// An enum gathering stats on commitment transaction, either local or remote.
1017 struct CommitmentStats<'a> {
1018         tx: CommitmentTransaction, // the transaction info
1019         feerate_per_kw: u32, // the feerate included to build the transaction
1020         total_fee_sat: u64, // the total fee included in the transaction
1021         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
1022         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
1023         local_balance_msat: u64, // local balance before fees *not* considering dust limits
1024         remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
1025         outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
1026         inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
1027 }
1028
1029 /// Used when calculating whether we or the remote can afford an additional HTLC.
1030 struct HTLCCandidate {
1031         amount_msat: u64,
1032         origin: HTLCInitiator,
1033 }
1034
1035 impl HTLCCandidate {
1036         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1037                 Self {
1038                         amount_msat,
1039                         origin,
1040                 }
1041         }
1042 }
1043
1044 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1045 /// description
1046 enum UpdateFulfillFetch {
1047         NewClaim {
1048                 monitor_update: ChannelMonitorUpdate,
1049                 htlc_value_msat: u64,
1050                 msg: Option<msgs::UpdateFulfillHTLC>,
1051         },
1052         DuplicateClaim {},
1053 }
1054
1055 /// The return type of get_update_fulfill_htlc_and_commit.
1056 pub enum UpdateFulfillCommitFetch {
1057         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1058         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1059         /// previously placed in the holding cell (and has since been removed).
1060         NewClaim {
1061                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1062                 monitor_update: ChannelMonitorUpdate,
1063                 /// The value of the HTLC which was claimed, in msat.
1064                 htlc_value_msat: u64,
1065         },
1066         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1067         /// or has been forgotten (presumably previously claimed).
1068         DuplicateClaim {},
1069 }
1070
1071 /// The return value of `monitor_updating_restored`
1072 pub(super) struct MonitorRestoreUpdates {
1073         pub raa: Option<msgs::RevokeAndACK>,
1074         pub commitment_update: Option<msgs::CommitmentUpdate>,
1075         pub order: RAACommitmentOrder,
1076         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1077         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1078         pub finalized_claimed_htlcs: Vec<HTLCSource>,
1079         pub pending_update_adds: Vec<msgs::UpdateAddHTLC>,
1080         pub funding_broadcastable: Option<Transaction>,
1081         pub channel_ready: Option<msgs::ChannelReady>,
1082         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1083 }
1084
1085 /// The return value of `signer_maybe_unblocked`
1086 #[allow(unused)]
1087 pub(super) struct SignerResumeUpdates {
1088         pub commitment_update: Option<msgs::CommitmentUpdate>,
1089         pub funding_signed: Option<msgs::FundingSigned>,
1090         pub channel_ready: Option<msgs::ChannelReady>,
1091 }
1092
1093 /// The return value of `channel_reestablish`
1094 pub(super) struct ReestablishResponses {
1095         pub channel_ready: Option<msgs::ChannelReady>,
1096         pub raa: Option<msgs::RevokeAndACK>,
1097         pub commitment_update: Option<msgs::CommitmentUpdate>,
1098         pub order: RAACommitmentOrder,
1099         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1100         pub shutdown_msg: Option<msgs::Shutdown>,
1101 }
1102
1103 /// The result of a shutdown that should be handled.
1104 #[must_use]
1105 pub(crate) struct ShutdownResult {
1106         pub(crate) closure_reason: ClosureReason,
1107         /// A channel monitor update to apply.
1108         pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1109         /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1110         pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1111         /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1112         /// propagated to the remainder of the batch.
1113         pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1114         pub(crate) channel_id: ChannelId,
1115         pub(crate) user_channel_id: u128,
1116         pub(crate) channel_capacity_satoshis: u64,
1117         pub(crate) counterparty_node_id: PublicKey,
1118         pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1119         pub(crate) channel_funding_txo: Option<OutPoint>,
1120 }
1121
1122 /// Tracks the transaction number, along with current and next commitment points.
1123 /// This consolidates the logic to advance our commitment number and request new
1124 /// commitment points from our signer.
1125 #[derive(Debug, Copy, Clone)]
1126 enum HolderCommitmentPoint {
1127         // TODO: add a variant for before our first commitment point is retrieved
1128         /// We've advanced our commitment number and are waiting on the next commitment point.
1129         /// Until the `get_per_commitment_point` signer method becomes async, this variant
1130         /// will not be used.
1131         PendingNext { transaction_number: u64, current: PublicKey },
1132         /// Our current commitment point is ready, we've cached our next point,
1133         /// and we are not pending a new one.
1134         Available { transaction_number: u64, current: PublicKey, next: PublicKey },
1135 }
1136
1137 impl HolderCommitmentPoint {
1138         pub fn new<SP: Deref>(signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>) -> Self
1139                 where SP::Target: SignerProvider
1140         {
1141                 HolderCommitmentPoint::Available {
1142                         transaction_number: INITIAL_COMMITMENT_NUMBER,
1143                         current: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER, secp_ctx),
1144                         next: signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, secp_ctx),
1145                 }
1146         }
1147
1148         pub fn is_available(&self) -> bool {
1149                 if let HolderCommitmentPoint::Available { .. } = self { true } else { false }
1150         }
1151
1152         pub fn transaction_number(&self) -> u64 {
1153                 match self {
1154                         HolderCommitmentPoint::PendingNext { transaction_number, .. } => *transaction_number,
1155                         HolderCommitmentPoint::Available { transaction_number, .. } => *transaction_number,
1156                 }
1157         }
1158
1159         pub fn current_point(&self) -> PublicKey {
1160                 match self {
1161                         HolderCommitmentPoint::PendingNext { current, .. } => *current,
1162                         HolderCommitmentPoint::Available { current, .. } => *current,
1163                 }
1164         }
1165
1166         pub fn next_point(&self) -> Option<PublicKey> {
1167                 match self {
1168                         HolderCommitmentPoint::PendingNext { .. } => None,
1169                         HolderCommitmentPoint::Available { next, .. } => Some(*next),
1170                 }
1171         }
1172
1173         pub fn advance<SP: Deref, L: Deref>(&mut self, signer: &ChannelSignerType<SP>, secp_ctx: &Secp256k1<secp256k1::All>, logger: &L)
1174                 where SP::Target: SignerProvider, L::Target: Logger
1175         {
1176                 if let HolderCommitmentPoint::Available { transaction_number, next, .. } = self {
1177                         *self = HolderCommitmentPoint::PendingNext {
1178                                 transaction_number: *transaction_number - 1,
1179                                 current: *next,
1180                         };
1181                 }
1182
1183                 if let HolderCommitmentPoint::PendingNext { transaction_number, current } = self {
1184                         let next = signer.as_ref().get_per_commitment_point(*transaction_number - 1, secp_ctx);
1185                         log_trace!(logger, "Retrieved next per-commitment point {}", *transaction_number - 1);
1186                         *self = HolderCommitmentPoint::Available { transaction_number: *transaction_number, current: *current, next };
1187                 }
1188         }
1189 }
1190
1191 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1192 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1193 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1194 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1195 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1196 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1197 /// by this multiple without hitting this case, before sending.
1198 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1199 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1200 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1201 /// leave the channel less usable as we hold a bigger reserve.
1202 #[cfg(any(fuzzing, test))]
1203 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1204 #[cfg(not(any(fuzzing, test)))]
1205 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1206
1207 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1208 /// channel creation on an inbound channel, we simply force-close and move on.
1209 /// This constant is the one suggested in BOLT 2.
1210 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1211
1212 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1213 /// not have enough balance value remaining to cover the onchain cost of this new
1214 /// HTLC weight. If this happens, our counterparty fails the reception of our
1215 /// commitment_signed including this new HTLC due to infringement on the channel
1216 /// reserve.
1217 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1218 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1219 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1220 /// design of LN state machines, allowing asynchronous updates.
1221 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1222
1223 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1224 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1225 /// transaction (not counting the value of the HTLCs themselves).
1226 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1227
1228 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1229 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1230 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1231 ///
1232 ///   * The expected interval between ticks (1 minute).
1233 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1234 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1235 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1236 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1237
1238 /// The number of ticks that may elapse while we're waiting for a response to a
1239 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1240 /// them.
1241 ///
1242 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1243 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1244
1245 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1246 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1247 /// exceeding this age limit will be force-closed and purged from memory.
1248 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1249
1250 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1251 pub(crate) const COINBASE_MATURITY: u32 = 100;
1252
1253 struct PendingChannelMonitorUpdate {
1254         update: ChannelMonitorUpdate,
1255 }
1256
1257 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1258         (0, update, required),
1259 });
1260
1261 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1262 /// its variants containing an appropriate channel struct.
1263 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1264         UnfundedOutboundV1(OutboundV1Channel<SP>),
1265         UnfundedInboundV1(InboundV1Channel<SP>),
1266         #[cfg(any(dual_funding, splicing))]
1267         UnfundedOutboundV2(OutboundV2Channel<SP>),
1268         #[cfg(any(dual_funding, splicing))]
1269         UnfundedInboundV2(InboundV2Channel<SP>),
1270         Funded(Channel<SP>),
1271 }
1272
1273 impl<'a, SP: Deref> ChannelPhase<SP> where
1274         SP::Target: SignerProvider,
1275         <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1276 {
1277         pub fn context(&'a self) -> &'a ChannelContext<SP> {
1278                 match self {
1279                         ChannelPhase::Funded(chan) => &chan.context,
1280                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1281                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1282                         #[cfg(any(dual_funding, splicing))]
1283                         ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
1284                         #[cfg(any(dual_funding, splicing))]
1285                         ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
1286                 }
1287         }
1288
1289         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1290                 match self {
1291                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1292                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1293                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1294                         #[cfg(any(dual_funding, splicing))]
1295                         ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
1296                         #[cfg(any(dual_funding, splicing))]
1297                         ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
1298                 }
1299         }
1300 }
1301
1302 /// Contains all state common to unfunded inbound/outbound channels.
1303 pub(super) struct UnfundedChannelContext {
1304         /// A counter tracking how many ticks have elapsed since this unfunded channel was
1305         /// created. If this unfunded channel reaches peer has yet to respond after reaching
1306         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1307         ///
1308         /// This is so that we don't keep channels around that haven't progressed to a funded state
1309         /// in a timely manner.
1310         unfunded_channel_age_ticks: usize,
1311 }
1312
1313 impl UnfundedChannelContext {
1314         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1315         /// having reached the unfunded channel age limit.
1316         ///
1317         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1318         pub fn should_expire_unfunded_channel(&mut self) -> bool {
1319                 self.unfunded_channel_age_ticks += 1;
1320                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1321         }
1322 }
1323
1324 /// Contains everything about the channel including state, and various flags.
1325 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1326         config: LegacyChannelConfig,
1327
1328         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1329         // constructed using it. The second element in the tuple corresponds to the number of ticks that
1330         // have elapsed since the update occurred.
1331         prev_config: Option<(ChannelConfig, usize)>,
1332
1333         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1334
1335         user_id: u128,
1336
1337         /// The current channel ID.
1338         channel_id: ChannelId,
1339         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1340         /// Will be `None` for channels created prior to 0.0.115.
1341         temporary_channel_id: Option<ChannelId>,
1342         channel_state: ChannelState,
1343
1344         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1345         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1346         // next connect.
1347         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1348         // Note that a number of our tests were written prior to the behavior here which retransmits
1349         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1350         // many tests.
1351         #[cfg(any(test, feature = "_test_utils"))]
1352         pub(crate) announcement_sigs_state: AnnouncementSigsState,
1353         #[cfg(not(any(test, feature = "_test_utils")))]
1354         announcement_sigs_state: AnnouncementSigsState,
1355
1356         secp_ctx: Secp256k1<secp256k1::All>,
1357         channel_value_satoshis: u64,
1358
1359         latest_monitor_update_id: u64,
1360
1361         holder_signer: ChannelSignerType<SP>,
1362         shutdown_scriptpubkey: Option<ShutdownScript>,
1363         destination_script: ScriptBuf,
1364
1365         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1366         // generation start at 0 and count up...this simplifies some parts of implementation at the
1367         // cost of others, but should really just be changed.
1368
1369         holder_commitment_point: HolderCommitmentPoint,
1370         cur_holder_commitment_transaction_number: u64,
1371         cur_counterparty_commitment_transaction_number: u64,
1372         value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1373         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1374         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1375         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1376
1377         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1378         /// need to ensure we resend them in the order we originally generated them. Note that because
1379         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1380         /// sufficient to simply set this to the opposite of any message we are generating as we
1381         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1382         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1383         /// send it first.
1384         resend_order: RAACommitmentOrder,
1385
1386         monitor_pending_channel_ready: bool,
1387         monitor_pending_revoke_and_ack: bool,
1388         monitor_pending_commitment_signed: bool,
1389
1390         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1391         // responsible for some of the HTLCs here or not - we don't know whether the update in question
1392         // completed or not. We currently ignore these fields entirely when force-closing a channel,
1393         // but need to handle this somehow or we run the risk of losing HTLCs!
1394         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1395         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1396         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1397         monitor_pending_update_adds: Vec<msgs::UpdateAddHTLC>,
1398
1399         /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1400         /// but our signer (initially) refused to give us a signature, we should retry at some point in
1401         /// the future when the signer indicates it may have a signature for us.
1402         ///
1403         /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1404         /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1405         signer_pending_commitment_update: bool,
1406         /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1407         /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1408         /// outbound or inbound.
1409         signer_pending_funding: bool,
1410
1411         // pending_update_fee is filled when sending and receiving update_fee.
1412         //
1413         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1414         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1415         // generating new commitment transactions with exactly the same criteria as inbound/outbound
1416         // HTLCs with similar state.
1417         pending_update_fee: Option<(u32, FeeUpdateState)>,
1418         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1419         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1420         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1421         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1422         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1423         holding_cell_update_fee: Option<u32>,
1424         next_holder_htlc_id: u64,
1425         next_counterparty_htlc_id: u64,
1426         feerate_per_kw: u32,
1427
1428         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1429         /// when the channel is updated in ways which may impact the `channel_update` message or when a
1430         /// new block is received, ensuring it's always at least moderately close to the current real
1431         /// time.
1432         update_time_counter: u32,
1433
1434         #[cfg(debug_assertions)]
1435         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1436         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1437         #[cfg(debug_assertions)]
1438         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1439         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1440
1441         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1442         target_closing_feerate_sats_per_kw: Option<u32>,
1443
1444         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1445         /// update, we need to delay processing it until later. We do that here by simply storing the
1446         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1447         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1448
1449         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1450         /// transaction. These are set once we reach `closing_negotiation_ready`.
1451         #[cfg(test)]
1452         pub(crate) closing_fee_limits: Option<(u64, u64)>,
1453         #[cfg(not(test))]
1454         closing_fee_limits: Option<(u64, u64)>,
1455
1456         /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1457         /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1458         /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1459         /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1460         /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1461         ///
1462         /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1463         /// until we see a `commitment_signed` before doing so.
1464         ///
1465         /// We don't bother to persist this - we anticipate this state won't last longer than a few
1466         /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1467         expecting_peer_commitment_signed: bool,
1468
1469         /// The hash of the block in which the funding transaction was included.
1470         funding_tx_confirmed_in: Option<BlockHash>,
1471         funding_tx_confirmation_height: u32,
1472         short_channel_id: Option<u64>,
1473         /// Either the height at which this channel was created or the height at which it was last
1474         /// serialized if it was serialized by versions prior to 0.0.103.
1475         /// We use this to close if funding is never broadcasted.
1476         pub(super) channel_creation_height: u32,
1477
1478         counterparty_dust_limit_satoshis: u64,
1479
1480         #[cfg(test)]
1481         pub(super) holder_dust_limit_satoshis: u64,
1482         #[cfg(not(test))]
1483         holder_dust_limit_satoshis: u64,
1484
1485         #[cfg(test)]
1486         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1487         #[cfg(not(test))]
1488         counterparty_max_htlc_value_in_flight_msat: u64,
1489
1490         #[cfg(test)]
1491         pub(super) holder_max_htlc_value_in_flight_msat: u64,
1492         #[cfg(not(test))]
1493         holder_max_htlc_value_in_flight_msat: u64,
1494
1495         /// minimum channel reserve for self to maintain - set by them.
1496         counterparty_selected_channel_reserve_satoshis: Option<u64>,
1497
1498         #[cfg(test)]
1499         pub(super) holder_selected_channel_reserve_satoshis: u64,
1500         #[cfg(not(test))]
1501         holder_selected_channel_reserve_satoshis: u64,
1502
1503         counterparty_htlc_minimum_msat: u64,
1504         holder_htlc_minimum_msat: u64,
1505         #[cfg(test)]
1506         pub counterparty_max_accepted_htlcs: u16,
1507         #[cfg(not(test))]
1508         counterparty_max_accepted_htlcs: u16,
1509         holder_max_accepted_htlcs: u16,
1510         minimum_depth: Option<u32>,
1511
1512         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1513
1514         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1515         funding_transaction: Option<Transaction>,
1516         is_batch_funding: Option<()>,
1517
1518         counterparty_cur_commitment_point: Option<PublicKey>,
1519         counterparty_prev_commitment_point: Option<PublicKey>,
1520         counterparty_node_id: PublicKey,
1521
1522         counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1523
1524         commitment_secrets: CounterpartyCommitmentSecrets,
1525
1526         channel_update_status: ChannelUpdateStatus,
1527         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1528         /// not complete within a single timer tick (one minute), we should force-close the channel.
1529         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1530         /// to DoS us.
1531         /// Note that this field is reset to false on deserialization to give us a chance to connect to
1532         /// our peer and start the closing_signed negotiation fresh.
1533         closing_signed_in_flight: bool,
1534
1535         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1536         /// This can be used to rebroadcast the channel_announcement message later.
1537         announcement_sigs: Option<(Signature, Signature)>,
1538
1539         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1540         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1541         // be, by comparing the cached values to the fee of the tranaction generated by
1542         // `build_commitment_transaction`.
1543         #[cfg(any(test, fuzzing))]
1544         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1545         #[cfg(any(test, fuzzing))]
1546         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1547
1548         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1549         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1550         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1551         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1552         /// message until we receive a channel_reestablish.
1553         ///
1554         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1555         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1556
1557         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1558         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1559         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1560         /// unblock the state machine.
1561         ///
1562         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1563         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1564         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1565         ///
1566         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1567         /// [`msgs::RevokeAndACK`] message from the counterparty.
1568         sent_message_awaiting_response: Option<usize>,
1569
1570         #[cfg(any(test, fuzzing))]
1571         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1572         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1573         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1574         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1575         // is fine, but as a sanity check in our failure to generate the second claim, we check here
1576         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1577         historical_inbound_htlc_fulfills: HashSet<u64>,
1578
1579         /// This channel's type, as negotiated during channel open
1580         channel_type: ChannelTypeFeatures,
1581
1582         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1583         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1584         // the channel's funding UTXO.
1585         //
1586         // We also use this when sending our peer a channel_update that isn't to be broadcasted
1587         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1588         // associated channel mapping.
1589         //
1590         // We only bother storing the most recent SCID alias at any time, though our counterparty has
1591         // to store all of them.
1592         latest_inbound_scid_alias: Option<u64>,
1593
1594         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1595         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1596         // don't currently support node id aliases and eventually privacy should be provided with
1597         // blinded paths instead of simple scid+node_id aliases.
1598         outbound_scid_alias: u64,
1599
1600         // We track whether we already emitted a `ChannelPending` event.
1601         channel_pending_event_emitted: bool,
1602
1603         // We track whether we already emitted a `ChannelReady` event.
1604         channel_ready_event_emitted: bool,
1605
1606         /// Some if we initiated to shut down the channel.
1607         local_initiated_shutdown: Option<()>,
1608
1609         /// The unique identifier used to re-derive the private key material for the channel through
1610         /// [`SignerProvider::derive_channel_signer`].
1611         #[cfg(not(test))]
1612         channel_keys_id: [u8; 32],
1613         #[cfg(test)]
1614         pub channel_keys_id: [u8; 32],
1615
1616         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1617         /// store it here and only release it to the `ChannelManager` once it asks for it.
1618         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1619 }
1620
1621 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
1622         fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1623                 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1624                 entropy_source: &'a ES,
1625                 signer_provider: &'a SP,
1626                 counterparty_node_id: PublicKey,
1627                 their_features: &'a InitFeatures,
1628                 user_id: u128,
1629                 config: &'a UserConfig,
1630                 current_chain_height: u32,
1631                 logger: &'a L,
1632                 is_0conf: bool,
1633                 our_funding_satoshis: u64,
1634                 counterparty_pubkeys: ChannelPublicKeys,
1635                 channel_type: ChannelTypeFeatures,
1636                 holder_selected_channel_reserve_satoshis: u64,
1637                 msg_channel_reserve_satoshis: u64,
1638                 msg_push_msat: u64,
1639                 open_channel_fields: msgs::CommonOpenChannelFields,
1640         ) -> Result<ChannelContext<SP>, ChannelError>
1641                 where
1642                         ES::Target: EntropySource,
1643                         F::Target: FeeEstimator,
1644                         L::Target: Logger,
1645                         SP::Target: SignerProvider,
1646         {
1647                 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id), None);
1648                 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1649
1650                 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1651
1652                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1653                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1654                 let pubkeys = holder_signer.pubkeys().clone();
1655
1656                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1657                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1658                 }
1659
1660                 // Check sanity of message fields:
1661                 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1662                         return Err(ChannelError::Close(format!(
1663                                 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1664                                 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1665                                 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1666                 }
1667                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1668                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1669                 }
1670                 if msg_channel_reserve_satoshis > channel_value_satoshis {
1671                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1672                 }
1673                 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1674                 if msg_push_msat > full_channel_value_msat {
1675                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1676                 }
1677                 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1678                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1679                 }
1680                 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1681                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1682                 }
1683                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1684
1685                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1686                 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1687                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1688                 }
1689                 if open_channel_fields.max_accepted_htlcs < 1 {
1690                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1691                 }
1692                 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1693                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1694                 }
1695
1696                 // Now check against optional parameters as set by config...
1697                 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1698                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1699                 }
1700                 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1701                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1702                 }
1703                 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1704                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1705                 }
1706                 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1707                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1708                 }
1709                 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1710                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1711                 }
1712                 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1713                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1714                 }
1715                 if open_channel_fields.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
1716                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1717                 }
1718
1719                 // Convert things into internal flags and prep our state:
1720
1721                 if config.channel_handshake_limits.force_announced_channel_preference {
1722                         if config.channel_handshake_config.announced_channel != announced_channel {
1723                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1724                         }
1725                 }
1726
1727                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1728                         // Protocol level safety check in place, although it should never happen because
1729                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1730                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1731                 }
1732                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1733                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1734                 }
1735                 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1736                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1737                                 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1738                 }
1739                 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1740                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1741                 }
1742
1743                 // check if the funder's amount for the initial commitment tx is sufficient
1744                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1745                 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1746                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1747                 } else {
1748                         0
1749                 };
1750                 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1751                 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1752                 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1753                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1754                 }
1755
1756                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1757                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1758                 // want to push much to us), our counterparty should always have more than our reserve.
1759                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1760                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1761                 }
1762
1763                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1764                         match &open_channel_fields.shutdown_scriptpubkey {
1765                                 &Some(ref script) => {
1766                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1767                                         if script.len() == 0 {
1768                                                 None
1769                                         } else {
1770                                                 if !script::is_bolt2_compliant(&script, their_features) {
1771                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1772                                                 }
1773                                                 Some(script.clone())
1774                                         }
1775                                 },
1776                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1777                                 &None => {
1778                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1779                                 }
1780                         }
1781                 } else { None };
1782
1783                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1784                         match signer_provider.get_shutdown_scriptpubkey() {
1785                                 Ok(scriptpubkey) => Some(scriptpubkey),
1786                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1787                         }
1788                 } else { None };
1789
1790                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1791                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
1792                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1793                         }
1794                 }
1795
1796                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1797                         Ok(script) => script,
1798                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1799                 };
1800
1801                 let mut secp_ctx = Secp256k1::new();
1802                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1803
1804                 let minimum_depth = if is_0conf {
1805                         Some(0)
1806                 } else {
1807                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1808                 };
1809
1810                 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1811
1812                 let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
1813                 let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
1814
1815                 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1816
1817                 let channel_context = ChannelContext {
1818                         user_id,
1819
1820                         config: LegacyChannelConfig {
1821                                 options: config.channel_config.clone(),
1822                                 announced_channel,
1823                                 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1824                         },
1825
1826                         prev_config: None,
1827
1828                         inbound_handshake_limits_override: None,
1829
1830                         temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1831                         channel_id: open_channel_fields.temporary_channel_id,
1832                         channel_state: ChannelState::NegotiatingFunding(
1833                                 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1834                         ),
1835                         announcement_sigs_state: AnnouncementSigsState::NotSent,
1836                         secp_ctx,
1837
1838                         latest_monitor_update_id: 0,
1839
1840                         holder_signer,
1841                         shutdown_scriptpubkey,
1842                         destination_script,
1843
1844                         holder_commitment_point,
1845                         cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1846                         cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1847                         value_to_self_msat,
1848
1849                         pending_inbound_htlcs: Vec::new(),
1850                         pending_outbound_htlcs: Vec::new(),
1851                         holding_cell_htlc_updates: Vec::new(),
1852                         pending_update_fee: None,
1853                         holding_cell_update_fee: None,
1854                         next_holder_htlc_id: 0,
1855                         next_counterparty_htlc_id: 0,
1856                         update_time_counter: 1,
1857
1858                         resend_order: RAACommitmentOrder::CommitmentFirst,
1859
1860                         monitor_pending_channel_ready: false,
1861                         monitor_pending_revoke_and_ack: false,
1862                         monitor_pending_commitment_signed: false,
1863                         monitor_pending_forwards: Vec::new(),
1864                         monitor_pending_failures: Vec::new(),
1865                         monitor_pending_finalized_fulfills: Vec::new(),
1866                         monitor_pending_update_adds: Vec::new(),
1867
1868                         signer_pending_commitment_update: false,
1869                         signer_pending_funding: false,
1870
1871
1872                         #[cfg(debug_assertions)]
1873                         holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1874                         #[cfg(debug_assertions)]
1875                         counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1876
1877                         last_sent_closing_fee: None,
1878                         pending_counterparty_closing_signed: None,
1879                         expecting_peer_commitment_signed: false,
1880                         closing_fee_limits: None,
1881                         target_closing_feerate_sats_per_kw: None,
1882
1883                         funding_tx_confirmed_in: None,
1884                         funding_tx_confirmation_height: 0,
1885                         short_channel_id: None,
1886                         channel_creation_height: current_chain_height,
1887
1888                         feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1889                         channel_value_satoshis,
1890                         counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1891                         holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1892                         counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1893                         holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1894                         counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1895                         holder_selected_channel_reserve_satoshis,
1896                         counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1897                         holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1898                         counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1899                         holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1900                         minimum_depth,
1901
1902                         counterparty_forwarding_info: None,
1903
1904                         channel_transaction_parameters: ChannelTransactionParameters {
1905                                 holder_pubkeys: pubkeys,
1906                                 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1907                                 is_outbound_from_holder: false,
1908                                 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1909                                         selected_contest_delay: open_channel_fields.to_self_delay,
1910                                         pubkeys: counterparty_pubkeys,
1911                                 }),
1912                                 funding_outpoint: None,
1913                                 channel_type_features: channel_type.clone()
1914                         },
1915                         funding_transaction: None,
1916                         is_batch_funding: None,
1917
1918                         counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1919                         counterparty_prev_commitment_point: None,
1920                         counterparty_node_id,
1921
1922                         counterparty_shutdown_scriptpubkey,
1923
1924                         commitment_secrets: CounterpartyCommitmentSecrets::new(),
1925
1926                         channel_update_status: ChannelUpdateStatus::Enabled,
1927                         closing_signed_in_flight: false,
1928
1929                         announcement_sigs: None,
1930
1931                         #[cfg(any(test, fuzzing))]
1932                         next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1933                         #[cfg(any(test, fuzzing))]
1934                         next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1935
1936                         workaround_lnd_bug_4006: None,
1937                         sent_message_awaiting_response: None,
1938
1939                         latest_inbound_scid_alias: None,
1940                         outbound_scid_alias: 0,
1941
1942                         channel_pending_event_emitted: false,
1943                         channel_ready_event_emitted: false,
1944
1945                         #[cfg(any(test, fuzzing))]
1946                         historical_inbound_htlc_fulfills: new_hash_set(),
1947
1948                         channel_type,
1949                         channel_keys_id,
1950
1951                         local_initiated_shutdown: None,
1952
1953                         blocked_monitor_updates: Vec::new(),
1954                 };
1955
1956                 Ok(channel_context)
1957         }
1958
1959         fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
1960                 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1961                 entropy_source: &'a ES,
1962                 signer_provider: &'a SP,
1963                 counterparty_node_id: PublicKey,
1964                 their_features: &'a InitFeatures,
1965                 funding_satoshis: u64,
1966                 push_msat: u64,
1967                 user_id: u128,
1968                 config: &'a UserConfig,
1969                 current_chain_height: u32,
1970                 outbound_scid_alias: u64,
1971                 temporary_channel_id: Option<ChannelId>,
1972                 holder_selected_channel_reserve_satoshis: u64,
1973                 channel_keys_id: [u8; 32],
1974                 holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
1975                 pubkeys: ChannelPublicKeys,
1976         ) -> Result<ChannelContext<SP>, APIError>
1977                 where
1978                         ES::Target: EntropySource,
1979                         F::Target: FeeEstimator,
1980                         SP::Target: SignerProvider,
1981         {
1982                 // This will be updated with the counterparty contribution if this is a dual-funded channel
1983                 let channel_value_satoshis = funding_satoshis;
1984
1985                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
1986
1987                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
1988                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1989                 }
1990                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1991                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1992                 }
1993                 let channel_value_msat = channel_value_satoshis * 1000;
1994                 if push_msat > channel_value_msat {
1995                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1996                 }
1997                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1998                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1999                 }
2000
2001                 let channel_type = get_initial_channel_type(&config, their_features);
2002                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
2003
2004                 let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
2005                         (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
2006                 } else {
2007                         (ConfirmationTarget::NonAnchorChannelFee, 0)
2008                 };
2009                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
2010
2011                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
2012                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
2013                 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
2014                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
2015                 }
2016
2017                 let mut secp_ctx = Secp256k1::new();
2018                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
2019
2020                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
2021                         match signer_provider.get_shutdown_scriptpubkey() {
2022                                 Ok(scriptpubkey) => Some(scriptpubkey),
2023                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
2024                         }
2025                 } else { None };
2026
2027                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
2028                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
2029                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
2030                         }
2031                 }
2032
2033                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
2034                         Ok(script) => script,
2035                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
2036                 };
2037
2038                 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
2039
2040                 let holder_signer = ChannelSignerType::Ecdsa(holder_signer);
2041                 let holder_commitment_point = HolderCommitmentPoint::new(&holder_signer, &secp_ctx);
2042
2043                 Ok(Self {
2044                         user_id,
2045
2046                         config: LegacyChannelConfig {
2047                                 options: config.channel_config.clone(),
2048                                 announced_channel: config.channel_handshake_config.announced_channel,
2049                                 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
2050                         },
2051
2052                         prev_config: None,
2053
2054                         inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
2055
2056                         channel_id: temporary_channel_id,
2057                         temporary_channel_id: Some(temporary_channel_id),
2058                         channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
2059                         announcement_sigs_state: AnnouncementSigsState::NotSent,
2060                         secp_ctx,
2061                         // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
2062                         channel_value_satoshis,
2063
2064                         latest_monitor_update_id: 0,
2065
2066                         holder_signer,
2067                         shutdown_scriptpubkey,
2068                         destination_script,
2069
2070                         holder_commitment_point,
2071                         cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
2072                         cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
2073                         value_to_self_msat,
2074
2075                         pending_inbound_htlcs: Vec::new(),
2076                         pending_outbound_htlcs: Vec::new(),
2077                         holding_cell_htlc_updates: Vec::new(),
2078                         pending_update_fee: None,
2079                         holding_cell_update_fee: None,
2080                         next_holder_htlc_id: 0,
2081                         next_counterparty_htlc_id: 0,
2082                         update_time_counter: 1,
2083
2084                         resend_order: RAACommitmentOrder::CommitmentFirst,
2085
2086                         monitor_pending_channel_ready: false,
2087                         monitor_pending_revoke_and_ack: false,
2088                         monitor_pending_commitment_signed: false,
2089                         monitor_pending_forwards: Vec::new(),
2090                         monitor_pending_failures: Vec::new(),
2091                         monitor_pending_finalized_fulfills: Vec::new(),
2092                         monitor_pending_update_adds: Vec::new(),
2093
2094                         signer_pending_commitment_update: false,
2095                         signer_pending_funding: false,
2096
2097                         // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
2098                         // when we receive `accept_channel2`.
2099                         #[cfg(debug_assertions)]
2100                         holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
2101                         #[cfg(debug_assertions)]
2102                         counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
2103
2104                         last_sent_closing_fee: None,
2105                         pending_counterparty_closing_signed: None,
2106                         expecting_peer_commitment_signed: false,
2107                         closing_fee_limits: None,
2108                         target_closing_feerate_sats_per_kw: None,
2109
2110                         funding_tx_confirmed_in: None,
2111                         funding_tx_confirmation_height: 0,
2112                         short_channel_id: None,
2113                         channel_creation_height: current_chain_height,
2114
2115                         feerate_per_kw: commitment_feerate,
2116                         counterparty_dust_limit_satoshis: 0,
2117                         holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
2118                         counterparty_max_htlc_value_in_flight_msat: 0,
2119                         // We'll adjust this to include our counterparty's `funding_satoshis` when we
2120                         // receive `accept_channel2`.
2121                         holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
2122                         counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
2123                         holder_selected_channel_reserve_satoshis,
2124                         counterparty_htlc_minimum_msat: 0,
2125                         holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2126                         counterparty_max_accepted_htlcs: 0,
2127                         holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2128                         minimum_depth: None, // Filled in in accept_channel
2129
2130                         counterparty_forwarding_info: None,
2131
2132                         channel_transaction_parameters: ChannelTransactionParameters {
2133                                 holder_pubkeys: pubkeys,
2134                                 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2135                                 is_outbound_from_holder: true,
2136                                 counterparty_parameters: None,
2137                                 funding_outpoint: None,
2138                                 channel_type_features: channel_type.clone()
2139                         },
2140                         funding_transaction: None,
2141                         is_batch_funding: None,
2142
2143                         counterparty_cur_commitment_point: None,
2144                         counterparty_prev_commitment_point: None,
2145                         counterparty_node_id,
2146
2147                         counterparty_shutdown_scriptpubkey: None,
2148
2149                         commitment_secrets: CounterpartyCommitmentSecrets::new(),
2150
2151                         channel_update_status: ChannelUpdateStatus::Enabled,
2152                         closing_signed_in_flight: false,
2153
2154                         announcement_sigs: None,
2155
2156                         #[cfg(any(test, fuzzing))]
2157                         next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2158                         #[cfg(any(test, fuzzing))]
2159                         next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2160
2161                         workaround_lnd_bug_4006: None,
2162                         sent_message_awaiting_response: None,
2163
2164                         latest_inbound_scid_alias: None,
2165                         outbound_scid_alias,
2166
2167                         channel_pending_event_emitted: false,
2168                         channel_ready_event_emitted: false,
2169
2170                         #[cfg(any(test, fuzzing))]
2171                         historical_inbound_htlc_fulfills: new_hash_set(),
2172
2173                         channel_type,
2174                         channel_keys_id,
2175
2176                         blocked_monitor_updates: Vec::new(),
2177                         local_initiated_shutdown: None,
2178                 })
2179         }
2180
2181         /// Allowed in any state (including after shutdown)
2182         pub fn get_update_time_counter(&self) -> u32 {
2183                 self.update_time_counter
2184         }
2185
2186         pub fn get_latest_monitor_update_id(&self) -> u64 {
2187                 self.latest_monitor_update_id
2188         }
2189
2190         pub fn should_announce(&self) -> bool {
2191                 self.config.announced_channel
2192         }
2193
2194         pub fn is_outbound(&self) -> bool {
2195                 self.channel_transaction_parameters.is_outbound_from_holder
2196         }
2197
2198         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2199         /// Allowed in any state (including after shutdown)
2200         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
2201                 self.config.options.forwarding_fee_base_msat
2202         }
2203
2204         /// Returns true if we've ever received a message from the remote end for this Channel
2205         pub fn have_received_message(&self) -> bool {
2206                 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
2207         }
2208
2209         /// Returns true if this channel is fully established and not known to be closing.
2210         /// Allowed in any state (including after shutdown)
2211         pub fn is_usable(&self) -> bool {
2212                 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
2213                         !self.channel_state.is_local_shutdown_sent() &&
2214                         !self.channel_state.is_remote_shutdown_sent() &&
2215                         !self.monitor_pending_channel_ready
2216         }
2217
2218         /// shutdown state returns the state of the channel in its various stages of shutdown
2219         pub fn shutdown_state(&self) -> ChannelShutdownState {
2220                 match self.channel_state {
2221                         ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
2222                                 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
2223                                         ChannelShutdownState::ShutdownInitiated
2224                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
2225                                         ChannelShutdownState::ResolvingHTLCs
2226                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
2227                                         ChannelShutdownState::NegotiatingClosingFee
2228                                 } else {
2229                                         ChannelShutdownState::NotShuttingDown
2230                                 },
2231                         ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
2232                         _ => ChannelShutdownState::NotShuttingDown,
2233                 }
2234         }
2235
2236         fn closing_negotiation_ready(&self) -> bool {
2237                 let is_ready_to_close = match self.channel_state {
2238                         ChannelState::AwaitingChannelReady(flags) =>
2239                                 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2240                         ChannelState::ChannelReady(flags) =>
2241                                 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2242                         _ => false,
2243                 };
2244                 self.pending_inbound_htlcs.is_empty() &&
2245                         self.pending_outbound_htlcs.is_empty() &&
2246                         self.pending_update_fee.is_none() &&
2247                         is_ready_to_close
2248         }
2249
2250         /// Returns true if this channel is currently available for use. This is a superset of
2251         /// is_usable() and considers things like the channel being temporarily disabled.
2252         /// Allowed in any state (including after shutdown)
2253         pub fn is_live(&self) -> bool {
2254                 self.is_usable() && !self.channel_state.is_peer_disconnected()
2255         }
2256
2257         // Public utilities:
2258
2259         pub fn channel_id(&self) -> ChannelId {
2260                 self.channel_id
2261         }
2262
2263         // Return the `temporary_channel_id` used during channel establishment.
2264         //
2265         // Will return `None` for channels created prior to LDK version 0.0.115.
2266         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
2267                 self.temporary_channel_id
2268         }
2269
2270         pub fn minimum_depth(&self) -> Option<u32> {
2271                 self.minimum_depth
2272         }
2273
2274         /// Gets the "user_id" value passed into the construction of this channel. It has no special
2275         /// meaning and exists only to allow users to have a persistent identifier of a channel.
2276         pub fn get_user_id(&self) -> u128 {
2277                 self.user_id
2278         }
2279
2280         /// Gets the channel's type
2281         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
2282                 &self.channel_type
2283         }
2284
2285         /// Gets the channel's `short_channel_id`.
2286         ///
2287         /// Will return `None` if the channel hasn't been confirmed yet.
2288         pub fn get_short_channel_id(&self) -> Option<u64> {
2289                 self.short_channel_id
2290         }
2291
2292         /// Allowed in any state (including after shutdown)
2293         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
2294                 self.latest_inbound_scid_alias
2295         }
2296
2297         /// Allowed in any state (including after shutdown)
2298         pub fn outbound_scid_alias(&self) -> u64 {
2299                 self.outbound_scid_alias
2300         }
2301
2302         /// Returns the holder signer for this channel.
2303         #[cfg(test)]
2304         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
2305                 return &self.holder_signer
2306         }
2307
2308         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2309         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
2310         /// or prior to any channel actions during `Channel` initialization.
2311         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
2312                 debug_assert_eq!(self.outbound_scid_alias, 0);
2313                 self.outbound_scid_alias = outbound_scid_alias;
2314         }
2315
2316         /// Returns the funding_txo we either got from our peer, or were given by
2317         /// get_funding_created.
2318         pub fn get_funding_txo(&self) -> Option<OutPoint> {
2319                 self.channel_transaction_parameters.funding_outpoint
2320         }
2321
2322         /// Returns the height in which our funding transaction was confirmed.
2323         pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
2324                 let conf_height = self.funding_tx_confirmation_height;
2325                 if conf_height > 0 {
2326                         Some(conf_height)
2327                 } else {
2328                         None
2329                 }
2330         }
2331
2332         /// Returns the block hash in which our funding transaction was confirmed.
2333         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
2334                 self.funding_tx_confirmed_in
2335         }
2336
2337         /// Returns the current number of confirmations on the funding transaction.
2338         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
2339                 if self.funding_tx_confirmation_height == 0 {
2340                         // We either haven't seen any confirmation yet, or observed a reorg.
2341                         return 0;
2342                 }
2343
2344                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2345         }
2346
2347         fn get_holder_selected_contest_delay(&self) -> u16 {
2348                 self.channel_transaction_parameters.holder_selected_contest_delay
2349         }
2350
2351         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2352                 &self.channel_transaction_parameters.holder_pubkeys
2353         }
2354
2355         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2356                 self.channel_transaction_parameters.counterparty_parameters
2357                         .as_ref().map(|params| params.selected_contest_delay)
2358         }
2359
2360         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2361                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2362         }
2363
2364         /// Allowed in any state (including after shutdown)
2365         pub fn get_counterparty_node_id(&self) -> PublicKey {
2366                 self.counterparty_node_id
2367         }
2368
2369         /// Allowed in any state (including after shutdown)
2370         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2371                 self.holder_htlc_minimum_msat
2372         }
2373
2374         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2375         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2376                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2377         }
2378
2379         /// Allowed in any state (including after shutdown)
2380         pub fn get_announced_htlc_max_msat(&self) -> u64 {
2381                 return cmp::min(
2382                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2383                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
2384                         // channel might have been used to route very small values (either by honest users or as DoS).
2385                         self.channel_value_satoshis * 1000 * 9 / 10,
2386
2387                         self.counterparty_max_htlc_value_in_flight_msat
2388                 );
2389         }
2390
2391         /// Allowed in any state (including after shutdown)
2392         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2393                 self.counterparty_htlc_minimum_msat
2394         }
2395
2396         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2397         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2398                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2399         }
2400
2401         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2402                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2403                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2404                         cmp::min(
2405                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2406                                 party_max_htlc_value_in_flight_msat
2407                         )
2408                 })
2409         }
2410
2411         pub fn get_value_satoshis(&self) -> u64 {
2412                 self.channel_value_satoshis
2413         }
2414
2415         pub fn get_fee_proportional_millionths(&self) -> u32 {
2416                 self.config.options.forwarding_fee_proportional_millionths
2417         }
2418
2419         pub fn get_cltv_expiry_delta(&self) -> u16 {
2420                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2421         }
2422
2423         fn get_dust_exposure_limiting_feerate<F: Deref>(&self,
2424                 fee_estimator: &LowerBoundedFeeEstimator<F>,
2425         ) -> u32 where F::Target: FeeEstimator {
2426                 fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::OnChainSweep)
2427         }
2428
2429         pub fn get_max_dust_htlc_exposure_msat(&self, limiting_feerate_sat_per_kw: u32) -> u64 {
2430                 match self.config.options.max_dust_htlc_exposure {
2431                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2432                                 (limiting_feerate_sat_per_kw as u64).saturating_mul(multiplier)
2433                         },
2434                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2435                 }
2436         }
2437
2438         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2439         pub fn prev_config(&self) -> Option<ChannelConfig> {
2440                 self.prev_config.map(|prev_config| prev_config.0)
2441         }
2442
2443         // Checks whether we should emit a `ChannelPending` event.
2444         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2445                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2446         }
2447
2448         // Returns whether we already emitted a `ChannelPending` event.
2449         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2450                 self.channel_pending_event_emitted
2451         }
2452
2453         // Remembers that we already emitted a `ChannelPending` event.
2454         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2455                 self.channel_pending_event_emitted = true;
2456         }
2457
2458         // Checks whether we should emit a `ChannelReady` event.
2459         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2460                 self.is_usable() && !self.channel_ready_event_emitted
2461         }
2462
2463         // Remembers that we already emitted a `ChannelReady` event.
2464         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2465                 self.channel_ready_event_emitted = true;
2466         }
2467
2468         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2469         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2470         /// no longer be considered when forwarding HTLCs.
2471         pub fn maybe_expire_prev_config(&mut self) {
2472                 if self.prev_config.is_none() {
2473                         return;
2474                 }
2475                 let prev_config = self.prev_config.as_mut().unwrap();
2476                 prev_config.1 += 1;
2477                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2478                         self.prev_config = None;
2479                 }
2480         }
2481
2482         /// Returns the current [`ChannelConfig`] applied to the channel.
2483         pub fn config(&self) -> ChannelConfig {
2484                 self.config.options
2485         }
2486
2487         /// Updates the channel's config. A bool is returned indicating whether the config update
2488         /// applied resulted in a new ChannelUpdate message.
2489         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2490                 let did_channel_update =
2491                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2492                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2493                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2494                 if did_channel_update {
2495                         self.prev_config = Some((self.config.options, 0));
2496                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2497                         // policy change to propagate throughout the network.
2498                         self.update_time_counter += 1;
2499                 }
2500                 self.config.options = *config;
2501                 did_channel_update
2502         }
2503
2504         /// Returns true if funding_signed was sent/received and the
2505         /// funding transaction has been broadcast if necessary.
2506         pub fn is_funding_broadcast(&self) -> bool {
2507                 !self.channel_state.is_pre_funded_state() &&
2508                         !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2509         }
2510
2511         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2512         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2513         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2514         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2515         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2516         /// an HTLC to a).
2517         /// @local is used only to convert relevant internal structures which refer to remote vs local
2518         /// to decide value of outputs and direction of HTLCs.
2519         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2520         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2521         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2522         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2523         /// which peer generated this transaction and "to whom" this transaction flows.
2524         #[inline]
2525         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2526                 where L::Target: Logger
2527         {
2528                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2529                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2530                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2531
2532                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2533                 let mut remote_htlc_total_msat = 0;
2534                 let mut local_htlc_total_msat = 0;
2535                 let mut value_to_self_msat_offset = 0;
2536
2537                 let mut feerate_per_kw = self.feerate_per_kw;
2538                 if let Some((feerate, update_state)) = self.pending_update_fee {
2539                         if match update_state {
2540                                 // Note that these match the inclusion criteria when scanning
2541                                 // pending_inbound_htlcs below.
2542                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2543                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2544                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
2545                         } {
2546                                 feerate_per_kw = feerate;
2547                         }
2548                 }
2549
2550                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2551                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2552                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2553                         &self.channel_id,
2554                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2555
2556                 macro_rules! get_htlc_in_commitment {
2557                         ($htlc: expr, $offered: expr) => {
2558                                 HTLCOutputInCommitment {
2559                                         offered: $offered,
2560                                         amount_msat: $htlc.amount_msat,
2561                                         cltv_expiry: $htlc.cltv_expiry,
2562                                         payment_hash: $htlc.payment_hash,
2563                                         transaction_output_index: None
2564                                 }
2565                         }
2566                 }
2567
2568                 macro_rules! add_htlc_output {
2569                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2570                                 if $outbound == local { // "offered HTLC output"
2571                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2572                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2573                                                 0
2574                                         } else {
2575                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2576                                         };
2577                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2578                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2579                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
2580                                         } else {
2581                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2582                                                 included_dust_htlcs.push((htlc_in_tx, $source));
2583                                         }
2584                                 } else {
2585                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2586                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2587                                                 0
2588                                         } else {
2589                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2590                                         };
2591                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2592                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2593                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
2594                                         } else {
2595                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2596                                                 included_dust_htlcs.push((htlc_in_tx, $source));
2597                                         }
2598                                 }
2599                         }
2600                 }
2601
2602                 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2603
2604                 for ref htlc in self.pending_inbound_htlcs.iter() {
2605                         let (include, state_name) = match htlc.state {
2606                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2607                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2608                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2609                                 InboundHTLCState::Committed => (true, "Committed"),
2610                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2611                         };
2612
2613                         if include {
2614                                 add_htlc_output!(htlc, false, None, state_name);
2615                                 remote_htlc_total_msat += htlc.amount_msat;
2616                         } else {
2617                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2618                                 match &htlc.state {
2619                                         &InboundHTLCState::LocalRemoved(ref reason) => {
2620                                                 if generated_by_local {
2621                                                         if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2622                                                                 inbound_htlc_preimages.push(preimage);
2623                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
2624                                                         }
2625                                                 }
2626                                         },
2627                                         _ => {},
2628                                 }
2629                         }
2630                 }
2631
2632
2633                 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2634
2635                 for ref htlc in self.pending_outbound_htlcs.iter() {
2636                         let (include, state_name) = match htlc.state {
2637                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2638                                 OutboundHTLCState::Committed => (true, "Committed"),
2639                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2640                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2641                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2642                         };
2643
2644                         let preimage_opt = match htlc.state {
2645                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2646                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2647                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2648                                 _ => None,
2649                         };
2650
2651                         if let Some(preimage) = preimage_opt {
2652                                 outbound_htlc_preimages.push(preimage);
2653                         }
2654
2655                         if include {
2656                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2657                                 local_htlc_total_msat += htlc.amount_msat;
2658                         } else {
2659                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2660                                 match htlc.state {
2661                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2662                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
2663                                         },
2664                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2665                                                 if !generated_by_local {
2666                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
2667                                                 }
2668                                         },
2669                                         _ => {},
2670                                 }
2671                         }
2672                 }
2673
2674                 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2675                 assert!(value_to_self_msat >= 0);
2676                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2677                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2678                 // "violate" their reserve value by couting those against it. Thus, we have to convert
2679                 // everything to i64 before subtracting as otherwise we can overflow.
2680                 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2681                 assert!(value_to_remote_msat >= 0);
2682
2683                 #[cfg(debug_assertions)]
2684                 {
2685                         // Make sure that the to_self/to_remote is always either past the appropriate
2686                         // channel_reserve *or* it is making progress towards it.
2687                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2688                                 self.holder_max_commitment_tx_output.lock().unwrap()
2689                         } else {
2690                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
2691                         };
2692                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2693                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2694                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2695                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2696                 }
2697
2698                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2699                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2700                 let (value_to_self, value_to_remote) = if self.is_outbound() {
2701                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2702                 } else {
2703                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2704                 };
2705
2706                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2707                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2708                 let (funding_pubkey_a, funding_pubkey_b) = if local {
2709                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2710                 } else {
2711                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2712                 };
2713
2714                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2715                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2716                 } else {
2717                         value_to_a = 0;
2718                 }
2719
2720                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2721                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2722                 } else {
2723                         value_to_b = 0;
2724                 }
2725
2726                 let num_nondust_htlcs = included_non_dust_htlcs.len();
2727
2728                 let channel_parameters =
2729                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2730                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2731                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2732                                                                              value_to_a as u64,
2733                                                                              value_to_b as u64,
2734                                                                              funding_pubkey_a,
2735                                                                              funding_pubkey_b,
2736                                                                              keys.clone(),
2737                                                                              feerate_per_kw,
2738                                                                              &mut included_non_dust_htlcs,
2739                                                                              &channel_parameters
2740                 );
2741                 let mut htlcs_included = included_non_dust_htlcs;
2742                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2743                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2744                 htlcs_included.append(&mut included_dust_htlcs);
2745
2746                 CommitmentStats {
2747                         tx,
2748                         feerate_per_kw,
2749                         total_fee_sat,
2750                         num_nondust_htlcs,
2751                         htlcs_included,
2752                         local_balance_msat: value_to_self_msat as u64,
2753                         remote_balance_msat: value_to_remote_msat as u64,
2754                         inbound_htlc_preimages,
2755                         outbound_htlc_preimages,
2756                 }
2757         }
2758
2759         #[inline]
2760         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2761         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2762         /// our counterparty!)
2763         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2764         /// TODO Some magic rust shit to compile-time check this?
2765         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2766                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2767                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2768                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2769                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2770
2771                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2772         }
2773
2774         #[inline]
2775         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2776         /// will sign and send to our counterparty.
2777         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2778         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2779                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2780                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2781                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2782
2783                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2784         }
2785
2786         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2787         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2788         /// Panics if called before accept_channel/InboundV1Channel::new
2789         pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2790                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2791         }
2792
2793         fn counterparty_funding_pubkey(&self) -> &PublicKey {
2794                 &self.get_counterparty_pubkeys().funding_pubkey
2795         }
2796
2797         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2798                 self.feerate_per_kw
2799         }
2800
2801         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2802                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2803                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2804                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2805                 // more dust balance if the feerate increases when we have several HTLCs pending
2806                 // which are near the dust limit.
2807                 let mut feerate_per_kw = self.feerate_per_kw;
2808                 // If there's a pending update fee, use it to ensure we aren't under-estimating
2809                 // potential feerate updates coming soon.
2810                 if let Some((feerate, _)) = self.pending_update_fee {
2811                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2812                 }
2813                 if let Some(feerate) = outbound_feerate_update {
2814                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2815                 }
2816                 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2817                 cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2818         }
2819
2820         /// Get forwarding information for the counterparty.
2821         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2822                 self.counterparty_forwarding_info.clone()
2823         }
2824
2825         /// Returns a HTLCStats about pending htlcs
2826         fn get_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>, dust_exposure_limiting_feerate: u32) -> HTLCStats {
2827                 let context = self;
2828                 let uses_0_htlc_fee_anchors = self.get_channel_type().supports_anchors_zero_fee_htlc_tx();
2829
2830                 let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update);
2831                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if uses_0_htlc_fee_anchors {
2832                         (0, 0)
2833                 } else {
2834                         (dust_buffer_feerate as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2835                                 dust_buffer_feerate as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2836                 };
2837
2838                 let mut on_holder_tx_dust_exposure_msat = 0;
2839                 let mut on_counterparty_tx_dust_exposure_msat = 0;
2840
2841                 let mut on_counterparty_tx_offered_nondust_htlcs = 0;
2842                 let mut on_counterparty_tx_accepted_nondust_htlcs = 0;
2843
2844                 let mut pending_inbound_htlcs_value_msat = 0;
2845
2846                 {
2847                         let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2848                         let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2849                         for ref htlc in context.pending_inbound_htlcs.iter() {
2850                                 pending_inbound_htlcs_value_msat += htlc.amount_msat;
2851                                 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2852                                         on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2853                                 } else {
2854                                         on_counterparty_tx_offered_nondust_htlcs += 1;
2855                                 }
2856                                 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2857                                         on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2858                                 }
2859                         }
2860                 }
2861
2862                 let mut pending_outbound_htlcs_value_msat = 0;
2863                 let mut outbound_holding_cell_msat = 0;
2864                 let mut on_holder_tx_outbound_holding_cell_htlcs_count = 0;
2865                 let mut pending_outbound_htlcs = self.pending_outbound_htlcs.len();
2866                 {
2867                         let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2868                         let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2869                         for ref htlc in context.pending_outbound_htlcs.iter() {
2870                                 pending_outbound_htlcs_value_msat += htlc.amount_msat;
2871                                 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2872                                         on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2873                                 } else {
2874                                         on_counterparty_tx_accepted_nondust_htlcs += 1;
2875                                 }
2876                                 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2877                                         on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2878                                 }
2879                         }
2880
2881                         for update in context.holding_cell_htlc_updates.iter() {
2882                                 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2883                                         pending_outbound_htlcs += 1;
2884                                         pending_outbound_htlcs_value_msat += amount_msat;
2885                                         outbound_holding_cell_msat += amount_msat;
2886                                         if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2887                                                 on_counterparty_tx_dust_exposure_msat += amount_msat;
2888                                         } else {
2889                                                 on_counterparty_tx_accepted_nondust_htlcs += 1;
2890                                         }
2891                                         if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2892                                                 on_holder_tx_dust_exposure_msat += amount_msat;
2893                                         } else {
2894                                                 on_holder_tx_outbound_holding_cell_htlcs_count += 1;
2895                                         }
2896                                 }
2897                         }
2898                 }
2899
2900                 // Include any mining "excess" fees in the dust calculation
2901                 let excess_feerate_opt = outbound_feerate_update
2902                         .or(self.pending_update_fee.map(|(fee, _)| fee))
2903                         .unwrap_or(self.feerate_per_kw)
2904                         .checked_sub(dust_exposure_limiting_feerate);
2905                 if let Some(excess_feerate) = excess_feerate_opt {
2906                         let on_counterparty_tx_nondust_htlcs =
2907                                 on_counterparty_tx_accepted_nondust_htlcs + on_counterparty_tx_offered_nondust_htlcs;
2908                         on_counterparty_tx_dust_exposure_msat +=
2909                                 commit_tx_fee_msat(excess_feerate, on_counterparty_tx_nondust_htlcs, &self.channel_type);
2910                         if !self.channel_type.supports_anchors_zero_fee_htlc_tx() {
2911                                 on_counterparty_tx_dust_exposure_msat +=
2912                                         on_counterparty_tx_accepted_nondust_htlcs as u64 * htlc_success_tx_weight(&self.channel_type)
2913                                         * excess_feerate as u64 / 1000;
2914                                 on_counterparty_tx_dust_exposure_msat +=
2915                                         on_counterparty_tx_offered_nondust_htlcs as u64 * htlc_timeout_tx_weight(&self.channel_type)
2916                                         * excess_feerate as u64 / 1000;
2917                         }
2918                 }
2919
2920                 HTLCStats {
2921                         pending_inbound_htlcs: self.pending_inbound_htlcs.len(),
2922                         pending_outbound_htlcs,
2923                         pending_inbound_htlcs_value_msat,
2924                         pending_outbound_htlcs_value_msat,
2925                         on_counterparty_tx_dust_exposure_msat,
2926                         on_holder_tx_dust_exposure_msat,
2927                         outbound_holding_cell_msat,
2928                         on_holder_tx_outbound_holding_cell_htlcs_count,
2929                 }
2930         }
2931
2932         /// Returns information on all pending inbound HTLCs.
2933         pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2934                 let mut holding_cell_states = new_hash_map();
2935                 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2936                         match holding_cell_update {
2937                                 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2938                                         holding_cell_states.insert(
2939                                                 htlc_id,
2940                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2941                                         );
2942                                 },
2943                                 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2944                                         holding_cell_states.insert(
2945                                                 htlc_id,
2946                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2947                                         );
2948                                 },
2949                                 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2950                                         holding_cell_states.insert(
2951                                                 htlc_id,
2952                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2953                                         );
2954                                 },
2955                                 // Outbound HTLC.
2956                                 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2957                         }
2958                 }
2959                 let mut inbound_details = Vec::new();
2960                 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2961                         0
2962                 } else {
2963                         let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2964                         dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2965                 };
2966                 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2967                 for htlc in self.pending_inbound_htlcs.iter() {
2968                         if let Some(state_details) = (&htlc.state).into() {
2969                                 inbound_details.push(InboundHTLCDetails{
2970                                         htlc_id: htlc.htlc_id,
2971                                         amount_msat: htlc.amount_msat,
2972                                         cltv_expiry: htlc.cltv_expiry,
2973                                         payment_hash: htlc.payment_hash,
2974                                         state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2975                                         is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2976                                 });
2977                         }
2978                 }
2979                 inbound_details
2980         }
2981
2982         /// Returns information on all pending outbound HTLCs.
2983         pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2984                 let mut outbound_details = Vec::new();
2985                 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2986                         0
2987                 } else {
2988                         let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2989                         dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2990                 };
2991                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2992                 for htlc in self.pending_outbound_htlcs.iter() {
2993                         outbound_details.push(OutboundHTLCDetails{
2994                                 htlc_id: Some(htlc.htlc_id),
2995                                 amount_msat: htlc.amount_msat,
2996                                 cltv_expiry: htlc.cltv_expiry,
2997                                 payment_hash: htlc.payment_hash,
2998                                 skimmed_fee_msat: htlc.skimmed_fee_msat,
2999                                 state: Some((&htlc.state).into()),
3000                                 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
3001                         });
3002                 }
3003                 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
3004                         if let HTLCUpdateAwaitingACK::AddHTLC {
3005                                 amount_msat,
3006                                 cltv_expiry,
3007                                 payment_hash,
3008                                 skimmed_fee_msat,
3009                                 ..
3010                         } = *holding_cell_update {
3011                                 outbound_details.push(OutboundHTLCDetails{
3012                                         htlc_id: None,
3013                                         amount_msat: amount_msat,
3014                                         cltv_expiry: cltv_expiry,
3015                                         payment_hash: payment_hash,
3016                                         skimmed_fee_msat: skimmed_fee_msat,
3017                                         state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
3018                                         is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
3019                                 });
3020                         }
3021                 }
3022                 outbound_details
3023         }
3024
3025         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
3026         /// Doesn't bother handling the
3027         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
3028         /// corner case properly.
3029         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
3030         -> AvailableBalances
3031         where F::Target: FeeEstimator
3032         {
3033                 let context = &self;
3034                 // Note that we have to handle overflow due to the case mentioned in the docs in general
3035                 // here.
3036
3037                 let dust_exposure_limiting_feerate = self.get_dust_exposure_limiting_feerate(&fee_estimator);
3038                 let htlc_stats = context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
3039
3040                 let mut balance_msat = context.value_to_self_msat;
3041                 for ref htlc in context.pending_inbound_htlcs.iter() {
3042                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
3043                                 balance_msat += htlc.amount_msat;
3044                         }
3045                 }
3046                 balance_msat -= htlc_stats.pending_outbound_htlcs_value_msat;
3047
3048                 let outbound_capacity_msat = context.value_to_self_msat
3049                                 .saturating_sub(htlc_stats.pending_outbound_htlcs_value_msat)
3050                                 .saturating_sub(
3051                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
3052
3053                 let mut available_capacity_msat = outbound_capacity_msat;
3054
3055                 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3056                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3057                 } else {
3058                         0
3059                 };
3060                 if context.is_outbound() {
3061                         // We should mind channel commit tx fee when computing how much of the available capacity
3062                         // can be used in the next htlc. Mirrors the logic in send_htlc.
3063                         //
3064                         // The fee depends on whether the amount we will be sending is above dust or not,
3065                         // and the answer will in turn change the amount itself â€” making it a circular
3066                         // dependency.
3067                         // This complicates the computation around dust-values, up to the one-htlc-value.
3068                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
3069                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3070                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
3071                         }
3072
3073                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
3074                         let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
3075                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
3076                         let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
3077                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3078                                 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3079                                 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3080                         }
3081
3082                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
3083                         // value ends up being below dust, we have this fee available again. In that case,
3084                         // match the value to right-below-dust.
3085                         let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
3086                                 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
3087                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
3088                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
3089                                 debug_assert!(one_htlc_difference_msat != 0);
3090                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
3091                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
3092                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
3093                         } else {
3094                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
3095                         }
3096                 } else {
3097                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
3098                         // sending a new HTLC won't reduce their balance below our reserve threshold.
3099                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
3100                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3101                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
3102                         }
3103
3104                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
3105                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
3106
3107                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
3108                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
3109                                 .saturating_sub(htlc_stats.pending_inbound_htlcs_value_msat);
3110
3111                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
3112                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
3113                                 // we've selected for them, we can only send dust HTLCs.
3114                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
3115                         }
3116                 }
3117
3118                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
3119
3120                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
3121                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
3122                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
3123                 // send above the dust limit (as the router can always overpay to meet the dust limit).
3124                 let mut remaining_msat_below_dust_exposure_limit = None;
3125                 let mut dust_exposure_dust_limit_msat = 0;
3126                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
3127
3128                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3129                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
3130                 } else {
3131                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
3132                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3133                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3134                 };
3135
3136                 let excess_feerate_opt = self.feerate_per_kw.checked_sub(dust_exposure_limiting_feerate);
3137                 if let Some(excess_feerate) = excess_feerate_opt {
3138                         let htlc_dust_exposure_msat =
3139                                 per_outbound_htlc_counterparty_commit_tx_fee_msat(excess_feerate, &context.channel_type);
3140                         let nondust_htlc_counterparty_tx_dust_exposure =
3141                                 htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat);
3142                         if nondust_htlc_counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
3143                                 // If adding an extra HTLC would put us over the dust limit in total fees, we cannot
3144                                 // send any non-dust HTLCs.
3145                                 available_capacity_msat = cmp::min(available_capacity_msat, htlc_success_dust_limit * 1000);
3146                         }
3147                 }
3148
3149                 if htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_success_dust_limit * 1000) > max_dust_htlc_exposure_msat.saturating_add(1) {
3150                         // Note that we don't use the `counterparty_tx_dust_exposure` (with
3151                         // `htlc_dust_exposure_msat`) here as it only applies to non-dust HTLCs.
3152                         remaining_msat_below_dust_exposure_limit =
3153                                 Some(max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_counterparty_tx_dust_exposure_msat));
3154                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
3155                 }
3156
3157                 if htlc_stats.on_holder_tx_dust_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
3158                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
3159                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
3160                                 max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_holder_tx_dust_exposure_msat)));
3161                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
3162                 }
3163
3164                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
3165                         if available_capacity_msat < dust_exposure_dust_limit_msat {
3166                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
3167                         } else {
3168                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
3169                         }
3170                 }
3171
3172                 available_capacity_msat = cmp::min(available_capacity_msat,
3173                         context.counterparty_max_htlc_value_in_flight_msat - htlc_stats.pending_outbound_htlcs_value_msat);
3174
3175                 if htlc_stats.pending_outbound_htlcs + 1 > context.counterparty_max_accepted_htlcs as usize {
3176                         available_capacity_msat = 0;
3177                 }
3178
3179                 AvailableBalances {
3180                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
3181                                         - context.value_to_self_msat as i64
3182                                         - htlc_stats.pending_inbound_htlcs_value_msat as i64
3183                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
3184                                 0) as u64,
3185                         outbound_capacity_msat,
3186                         next_outbound_htlc_limit_msat: available_capacity_msat,
3187                         next_outbound_htlc_minimum_msat,
3188                         balance_msat,
3189                 }
3190         }
3191
3192         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
3193                 let context = &self;
3194                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
3195         }
3196
3197         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
3198         /// number of pending HTLCs that are on track to be in our next commitment tx.
3199         ///
3200         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3201         /// `fee_spike_buffer_htlc` is `Some`.
3202         ///
3203         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3204         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3205         ///
3206         /// Dust HTLCs are excluded.
3207         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3208                 let context = &self;
3209                 assert!(context.is_outbound());
3210
3211                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3212                         (0, 0)
3213                 } else {
3214                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3215                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3216                 };
3217                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
3218                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
3219
3220                 let mut addl_htlcs = 0;
3221                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3222                 match htlc.origin {
3223                         HTLCInitiator::LocalOffered => {
3224                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3225                                         addl_htlcs += 1;
3226                                 }
3227                         },
3228                         HTLCInitiator::RemoteOffered => {
3229                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3230                                         addl_htlcs += 1;
3231                                 }
3232                         }
3233                 }
3234
3235                 let mut included_htlcs = 0;
3236                 for ref htlc in context.pending_inbound_htlcs.iter() {
3237                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
3238                                 continue
3239                         }
3240                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
3241                         // transaction including this HTLC if it times out before they RAA.
3242                         included_htlcs += 1;
3243                 }
3244
3245                 for ref htlc in context.pending_outbound_htlcs.iter() {
3246                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
3247                                 continue
3248                         }
3249                         match htlc.state {
3250                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
3251                                 OutboundHTLCState::Committed => included_htlcs += 1,
3252                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3253                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
3254                                 // transaction won't be generated until they send us their next RAA, which will mean
3255                                 // dropping any HTLCs in this state.
3256                                 _ => {},
3257                         }
3258                 }
3259
3260                 for htlc in context.holding_cell_htlc_updates.iter() {
3261                         match htlc {
3262                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
3263                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
3264                                                 continue
3265                                         }
3266                                         included_htlcs += 1
3267                                 },
3268                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
3269                                          // ack we're guaranteed to never include them in commitment txs anymore.
3270                         }
3271                 }
3272
3273                 let num_htlcs = included_htlcs + addl_htlcs;
3274                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3275                 #[cfg(any(test, fuzzing))]
3276                 {
3277                         let mut fee = res;
3278                         if fee_spike_buffer_htlc.is_some() {
3279                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3280                         }
3281                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
3282                                 + context.holding_cell_htlc_updates.len();
3283                         let commitment_tx_info = CommitmentTxInfoCached {
3284                                 fee,
3285                                 total_pending_htlcs,
3286                                 next_holder_htlc_id: match htlc.origin {
3287                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3288                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3289                                 },
3290                                 next_counterparty_htlc_id: match htlc.origin {
3291                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3292                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3293                                 },
3294                                 feerate: context.feerate_per_kw,
3295                         };
3296                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3297                 }
3298                 res
3299         }
3300
3301         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
3302         /// pending HTLCs that are on track to be in their next commitment tx
3303         ///
3304         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3305         /// `fee_spike_buffer_htlc` is `Some`.
3306         ///
3307         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3308         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3309         ///
3310         /// Dust HTLCs are excluded.
3311         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3312                 let context = &self;
3313                 assert!(!context.is_outbound());
3314
3315                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3316                         (0, 0)
3317                 } else {
3318                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3319                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3320                 };
3321                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
3322                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
3323
3324                 let mut addl_htlcs = 0;
3325                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3326                 match htlc.origin {
3327                         HTLCInitiator::LocalOffered => {
3328                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3329                                         addl_htlcs += 1;
3330                                 }
3331                         },
3332                         HTLCInitiator::RemoteOffered => {
3333                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3334                                         addl_htlcs += 1;
3335                                 }
3336                         }
3337                 }
3338
3339                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
3340                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3341                 // committed outbound HTLCs, see below.
3342                 let mut included_htlcs = 0;
3343                 for ref htlc in context.pending_inbound_htlcs.iter() {
3344                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3345                                 continue
3346                         }
3347                         included_htlcs += 1;
3348                 }
3349
3350                 for ref htlc in context.pending_outbound_htlcs.iter() {
3351                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3352                                 continue
3353                         }
3354                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3355                         // i.e. if they've responded to us with an RAA after announcement.
3356                         match htlc.state {
3357                                 OutboundHTLCState::Committed => included_htlcs += 1,
3358                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3359                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3360                                 _ => {},
3361                         }
3362                 }
3363
3364                 let num_htlcs = included_htlcs + addl_htlcs;
3365                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3366                 #[cfg(any(test, fuzzing))]
3367                 {
3368                         let mut fee = res;
3369                         if fee_spike_buffer_htlc.is_some() {
3370                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3371                         }
3372                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
3373                         let commitment_tx_info = CommitmentTxInfoCached {
3374                                 fee,
3375                                 total_pending_htlcs,
3376                                 next_holder_htlc_id: match htlc.origin {
3377                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3378                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3379                                 },
3380                                 next_counterparty_htlc_id: match htlc.origin {
3381                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3382                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3383                                 },
3384                                 feerate: context.feerate_per_kw,
3385                         };
3386                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3387                 }
3388                 res
3389         }
3390
3391         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3392                 match self.channel_state {
3393                         ChannelState::FundingNegotiated => f(),
3394                         ChannelState::AwaitingChannelReady(flags) =>
3395                                 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3396                                         flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3397                                 {
3398                                         f()
3399                                 } else {
3400                                         None
3401                                 },
3402                         _ => None,
3403                 }
3404         }
3405
3406         /// Returns the transaction if there is a pending funding transaction that is yet to be
3407         /// broadcast.
3408         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3409                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3410         }
3411
3412         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3413         /// broadcast.
3414         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3415                 self.if_unbroadcasted_funding(||
3416                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3417                 )
3418         }
3419
3420         /// Returns whether the channel is funded in a batch.
3421         pub fn is_batch_funding(&self) -> bool {
3422                 self.is_batch_funding.is_some()
3423         }
3424
3425         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3426         /// broadcast.
3427         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3428                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3429         }
3430
3431         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3432         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3433         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3434         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3435         /// immediately (others we will have to allow to time out).
3436         pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3437                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3438                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3439                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3440                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3441                 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3442
3443                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3444                 // return them to fail the payment.
3445                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3446                 let counterparty_node_id = self.get_counterparty_node_id();
3447                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3448                         match htlc_update {
3449                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3450                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3451                                 },
3452                                 _ => {}
3453                         }
3454                 }
3455                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3456                         // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3457                         // returning a channel monitor update here would imply a channel monitor update before
3458                         // we even registered the channel monitor to begin with, which is invalid.
3459                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
3460                         // funding transaction, don't return a funding txo (which prevents providing the
3461                         // monitor update to the user, even if we return one).
3462                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3463                         if !self.channel_state.is_pre_funded_state() {
3464                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3465                                 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3466                                         update_id: self.latest_monitor_update_id,
3467                                         counterparty_node_id: Some(self.counterparty_node_id),
3468                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3469                                         channel_id: Some(self.channel_id()),
3470                                 }))
3471                         } else { None }
3472                 } else { None };
3473                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3474                 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3475
3476                 self.channel_state = ChannelState::ShutdownComplete;
3477                 self.update_time_counter += 1;
3478                 ShutdownResult {
3479                         closure_reason,
3480                         monitor_update,
3481                         dropped_outbound_htlcs,
3482                         unbroadcasted_batch_funding_txid,
3483                         channel_id: self.channel_id,
3484                         user_channel_id: self.user_id,
3485                         channel_capacity_satoshis: self.channel_value_satoshis,
3486                         counterparty_node_id: self.counterparty_node_id,
3487                         unbroadcasted_funding_tx,
3488                         channel_funding_txo: self.get_funding_txo(),
3489                 }
3490         }
3491
3492         /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3493         fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3494                 let counterparty_keys = self.build_remote_transaction_keys();
3495                 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3496
3497                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3498                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3499                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3500                         &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3501
3502                 match &self.holder_signer {
3503                         // TODO (arik): move match into calling method for Taproot
3504                         ChannelSignerType::Ecdsa(ecdsa) => {
3505                                 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3506                                         .map(|(signature, _)| msgs::FundingSigned {
3507                                                 channel_id: self.channel_id(),
3508                                                 signature,
3509                                                 #[cfg(taproot)]
3510                                                 partial_signature_with_nonce: None,
3511                                         })
3512                                         .ok();
3513
3514                                 if funding_signed.is_none() {
3515                                         #[cfg(not(async_signing))] {
3516                                                 panic!("Failed to get signature for funding_signed");
3517                                         }
3518                                         #[cfg(async_signing)] {
3519                                                 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3520                                                 self.signer_pending_funding = true;
3521                                         }
3522                                 } else if self.signer_pending_funding {
3523                                         log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3524                                         self.signer_pending_funding = false;
3525                                 }
3526
3527                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3528                                 (counterparty_initial_commitment_tx, funding_signed)
3529                         },
3530                         // TODO (taproot|arik)
3531                         #[cfg(taproot)]
3532                         _ => todo!()
3533                 }
3534         }
3535
3536         /// If we receive an error message when attempting to open a channel, it may only be a rejection
3537         /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
3538         /// downgrade of channel features would be possible so that we can still open the channel.
3539         pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
3540                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
3541         ) -> Result<(), ()>
3542         where
3543                 F::Target: FeeEstimator
3544         {
3545                 if !self.is_outbound() ||
3546                         !matches!(
3547                                 self.channel_state, ChannelState::NegotiatingFunding(flags)
3548                                 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
3549                         )
3550                 {
3551                         return Err(());
3552                 }
3553                 if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
3554                         // We've exhausted our options
3555                         return Err(());
3556                 }
3557                 // We support opening a few different types of channels. Try removing our additional
3558                 // features one by one until we've either arrived at our default or the counterparty has
3559                 // accepted one.
3560                 //
3561                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
3562                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
3563                 // checks whether the counterparty supports every feature, this would only happen if the
3564                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
3565                 // whatever reason.
3566                 if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
3567                         self.channel_type.clear_anchors_zero_fee_htlc_tx();
3568                         self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
3569                         assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
3570                 } else if self.channel_type.supports_scid_privacy() {
3571                         self.channel_type.clear_scid_privacy();
3572                 } else {
3573                         self.channel_type = ChannelTypeFeatures::only_static_remote_key();
3574                 }
3575                 self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
3576                 Ok(())
3577         }
3578 }
3579
3580 // Internal utility functions for channels
3581
3582 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3583 /// `channel_value_satoshis` in msat, set through
3584 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3585 ///
3586 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3587 ///
3588 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3589 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3590         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3591                 1
3592         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3593                 100
3594         } else {
3595                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3596         };
3597         channel_value_satoshis * 10 * configured_percent
3598 }
3599
3600 /// Returns a minimum channel reserve value the remote needs to maintain,
3601 /// required by us according to the configured or default
3602 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3603 ///
3604 /// Guaranteed to return a value no larger than channel_value_satoshis
3605 ///
3606 /// This is used both for outbound and inbound channels and has lower bound
3607 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3608 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3609         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3610         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3611 }
3612
3613 /// This is for legacy reasons, present for forward-compatibility.
3614 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3615 /// from storage. Hence, we use this function to not persist default values of
3616 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3617 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3618         let (q, _) = channel_value_satoshis.overflowing_div(100);
3619         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3620 }
3621
3622 /// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
3623 /// default of 1% of the total channel value.
3624 ///
3625 /// Guaranteed to return a value no larger than channel_value_satoshis
3626 ///
3627 /// This is used both for outbound and inbound channels and has lower bound
3628 /// of `dust_limit_satoshis`.
3629 #[cfg(any(dual_funding, splicing))]
3630 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
3631         // Fixed at 1% of channel value by spec.
3632         let (q, _) = channel_value_satoshis.overflowing_div(100);
3633         cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
3634 }
3635
3636 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3637 // Note that num_htlcs should not include dust HTLCs.
3638 #[inline]
3639 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3640         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3641 }
3642
3643 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3644 // Note that num_htlcs should not include dust HTLCs.
3645 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3646         // Note that we need to divide before multiplying to round properly,
3647         // since the lowest denomination of bitcoin on-chain is the satoshi.
3648         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3649 }
3650
3651 pub(crate) fn per_outbound_htlc_counterparty_commit_tx_fee_msat(feerate_per_kw: u32, channel_type_features: &ChannelTypeFeatures) -> u64 {
3652         // Note that we need to divide before multiplying to round properly,
3653         // since the lowest denomination of bitcoin on-chain is the satoshi.
3654         let commitment_tx_fee = COMMITMENT_TX_WEIGHT_PER_HTLC * feerate_per_kw as u64 / 1000 * 1000;
3655         if channel_type_features.supports_anchors_zero_fee_htlc_tx() {
3656                 commitment_tx_fee + htlc_success_tx_weight(channel_type_features) * feerate_per_kw as u64 / 1000
3657         } else {
3658                 commitment_tx_fee
3659         }
3660 }
3661
3662 /// Context for dual-funded channels.
3663 #[cfg(any(dual_funding, splicing))]
3664 pub(super) struct DualFundingChannelContext {
3665         /// The amount in satoshis we will be contributing to the channel.
3666         pub our_funding_satoshis: u64,
3667         /// The amount in satoshis our counterparty will be contributing to the channel.
3668         pub their_funding_satoshis: u64,
3669         /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3670         /// to the current block height to align incentives against fee-sniping.
3671         pub funding_tx_locktime: u32,
3672         /// The feerate set by the initiator to be used for the funding transaction.
3673         pub funding_feerate_sat_per_1000_weight: u32,
3674 }
3675
3676 // Holder designates channel data owned for the benefit of the user client.
3677 // Counterparty designates channel data owned by the another channel participant entity.
3678 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3679         pub context: ChannelContext<SP>,
3680         #[cfg(any(dual_funding, splicing))]
3681         pub dual_funding_channel_context: Option<DualFundingChannelContext>,
3682 }
3683
3684 #[cfg(any(test, fuzzing))]
3685 struct CommitmentTxInfoCached {
3686         fee: u64,
3687         total_pending_htlcs: usize,
3688         next_holder_htlc_id: u64,
3689         next_counterparty_htlc_id: u64,
3690         feerate: u32,
3691 }
3692
3693 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3694 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3695 trait FailHTLCContents {
3696         type Message: FailHTLCMessageName;
3697         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3698         fn to_inbound_htlc_state(self) -> InboundHTLCState;
3699         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3700 }
3701 impl FailHTLCContents for msgs::OnionErrorPacket {
3702         type Message = msgs::UpdateFailHTLC;
3703         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3704                 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3705         }
3706         fn to_inbound_htlc_state(self) -> InboundHTLCState {
3707                 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3708         }
3709         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3710                 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3711         }
3712 }
3713 impl FailHTLCContents for ([u8; 32], u16) {
3714         type Message = msgs::UpdateFailMalformedHTLC;
3715         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3716                 msgs::UpdateFailMalformedHTLC {
3717                         htlc_id,
3718                         channel_id,
3719                         sha256_of_onion: self.0,
3720                         failure_code: self.1
3721                 }
3722         }
3723         fn to_inbound_htlc_state(self) -> InboundHTLCState {
3724                 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3725         }
3726         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3727                 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3728                         htlc_id,
3729                         sha256_of_onion: self.0,
3730                         failure_code: self.1
3731                 }
3732         }
3733 }
3734
3735 trait FailHTLCMessageName {
3736         fn name() -> &'static str;
3737 }
3738 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3739         fn name() -> &'static str {
3740                 "update_fail_htlc"
3741         }
3742 }
3743 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3744         fn name() -> &'static str {
3745                 "update_fail_malformed_htlc"
3746         }
3747 }
3748
3749 impl<SP: Deref> Channel<SP> where
3750         SP::Target: SignerProvider,
3751         <SP::Target as SignerProvider>::EcdsaSigner: EcdsaChannelSigner
3752 {
3753         fn check_remote_fee<F: Deref, L: Deref>(
3754                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3755                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3756         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3757         {
3758                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3759                         ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3760                 } else {
3761                         ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3762                 };
3763                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3764                 if feerate_per_kw < lower_limit {
3765                         if let Some(cur_feerate) = cur_feerate_per_kw {
3766                                 if feerate_per_kw > cur_feerate {
3767                                         log_warn!(logger,
3768                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3769                                                 cur_feerate, feerate_per_kw);
3770                                         return Ok(());
3771                                 }
3772                         }
3773                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3774                 }
3775                 Ok(())
3776         }
3777
3778         #[inline]
3779         fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3780                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3781                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3782                 // outside of those situations will fail.
3783                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3784         }
3785
3786         #[inline]
3787         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3788                 let mut ret =
3789                 (4 +                                                   // version
3790                  1 +                                                   // input count
3791                  36 +                                                  // prevout
3792                  1 +                                                   // script length (0)
3793                  4 +                                                   // sequence
3794                  1 +                                                   // output count
3795                  4                                                     // lock time
3796                  )*4 +                                                 // * 4 for non-witness parts
3797                 2 +                                                    // witness marker and flag
3798                 1 +                                                    // witness element count
3799                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
3800                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3801                 2*(1 + 71);                                            // two signatures + sighash type flags
3802                 if let Some(spk) = a_scriptpubkey {
3803                         ret += ((8+1) +                                    // output values and script length
3804                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
3805                 }
3806                 if let Some(spk) = b_scriptpubkey {
3807                         ret += ((8+1) +                                    // output values and script length
3808                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
3809                 }
3810                 ret
3811         }
3812
3813         #[inline]
3814         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3815                 assert!(self.context.pending_inbound_htlcs.is_empty());
3816                 assert!(self.context.pending_outbound_htlcs.is_empty());
3817                 assert!(self.context.pending_update_fee.is_none());
3818
3819                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3820                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3821                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3822
3823                 if value_to_holder < 0 {
3824                         assert!(self.context.is_outbound());
3825                         total_fee_satoshis += (-value_to_holder) as u64;
3826                 } else if value_to_counterparty < 0 {
3827                         assert!(!self.context.is_outbound());
3828                         total_fee_satoshis += (-value_to_counterparty) as u64;
3829                 }
3830
3831                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3832                         value_to_counterparty = 0;
3833                 }
3834
3835                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3836                         value_to_holder = 0;
3837                 }
3838
3839                 assert!(self.context.shutdown_scriptpubkey.is_some());
3840                 let holder_shutdown_script = self.get_closing_scriptpubkey();
3841                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3842                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3843
3844                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3845                 (closing_transaction, total_fee_satoshis)
3846         }
3847
3848         fn funding_outpoint(&self) -> OutPoint {
3849                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3850         }
3851
3852         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3853         /// entirely.
3854         ///
3855         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3856         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3857         ///
3858         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3859         /// disconnected).
3860         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3861                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3862         where L::Target: Logger {
3863                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3864                 // (see equivalent if condition there).
3865                 assert!(!self.context.channel_state.can_generate_new_commitment());
3866                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3867                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3868                 self.context.latest_monitor_update_id = mon_update_id;
3869                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3870                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3871                 }
3872         }
3873
3874         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3875                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3876                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3877                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3878                 // either.
3879                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3880                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3881                 }
3882
3883                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3884                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3885                 // these, but for now we just have to treat them as normal.
3886
3887                 let mut pending_idx = core::usize::MAX;
3888                 let mut htlc_value_msat = 0;
3889                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3890                         if htlc.htlc_id == htlc_id_arg {
3891                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3892                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3893                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3894                                 match htlc.state {
3895                                         InboundHTLCState::Committed => {},
3896                                         InboundHTLCState::LocalRemoved(ref reason) => {
3897                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3898                                                 } else {
3899                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3900                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3901                                                 }
3902                                                 return UpdateFulfillFetch::DuplicateClaim {};
3903                                         },
3904                                         _ => {
3905                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3906                                                 // Don't return in release mode here so that we can update channel_monitor
3907                                         }
3908                                 }
3909                                 pending_idx = idx;
3910                                 htlc_value_msat = htlc.amount_msat;
3911                                 break;
3912                         }
3913                 }
3914                 if pending_idx == core::usize::MAX {
3915                         #[cfg(any(test, fuzzing))]
3916                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3917                         // this is simply a duplicate claim, not previously failed and we lost funds.
3918                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3919                         return UpdateFulfillFetch::DuplicateClaim {};
3920                 }
3921
3922                 // Now update local state:
3923                 //
3924                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3925                 // can claim it even if the channel hits the chain before we see their next commitment.
3926                 self.context.latest_monitor_update_id += 1;
3927                 let monitor_update = ChannelMonitorUpdate {
3928                         update_id: self.context.latest_monitor_update_id,
3929                         counterparty_node_id: Some(self.context.counterparty_node_id),
3930                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3931                                 payment_preimage: payment_preimage_arg.clone(),
3932                         }],
3933                         channel_id: Some(self.context.channel_id()),
3934                 };
3935
3936                 if !self.context.channel_state.can_generate_new_commitment() {
3937                         // Note that this condition is the same as the assertion in
3938                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3939                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3940                         // do not not get into this branch.
3941                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
3942                                 match pending_update {
3943                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3944                                                 if htlc_id_arg == htlc_id {
3945                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
3946                                                         self.context.latest_monitor_update_id -= 1;
3947                                                         #[cfg(any(test, fuzzing))]
3948                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3949                                                         return UpdateFulfillFetch::DuplicateClaim {};
3950                                                 }
3951                                         },
3952                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3953                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3954                                         {
3955                                                 if htlc_id_arg == htlc_id {
3956                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3957                                                         // TODO: We may actually be able to switch to a fulfill here, though its
3958                                                         // rare enough it may not be worth the complexity burden.
3959                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3960                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3961                                                 }
3962                                         },
3963                                         _ => {}
3964                                 }
3965                         }
3966                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3967                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3968                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3969                         });
3970                         #[cfg(any(test, fuzzing))]
3971                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3972                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3973                 }
3974                 #[cfg(any(test, fuzzing))]
3975                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3976
3977                 {
3978                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3979                         if let InboundHTLCState::Committed = htlc.state {
3980                         } else {
3981                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3982                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3983                         }
3984                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3985                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3986                 }
3987
3988                 UpdateFulfillFetch::NewClaim {
3989                         monitor_update,
3990                         htlc_value_msat,
3991                         msg: Some(msgs::UpdateFulfillHTLC {
3992                                 channel_id: self.context.channel_id(),
3993                                 htlc_id: htlc_id_arg,
3994                                 payment_preimage: payment_preimage_arg,
3995                         }),
3996                 }
3997         }
3998
3999         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
4000                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
4001                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
4002                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
4003                                 // Even if we aren't supposed to let new monitor updates with commitment state
4004                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
4005                                 // matter what. Sadly, to push a new monitor update which flies before others
4006                                 // already queued, we have to insert it into the pending queue and update the
4007                                 // update_ids of all the following monitors.
4008                                 if release_cs_monitor && msg.is_some() {
4009                                         let mut additional_update = self.build_commitment_no_status_check(logger);
4010                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
4011                                         // to be strictly increasing by one, so decrement it here.
4012                                         self.context.latest_monitor_update_id = monitor_update.update_id;
4013                                         monitor_update.updates.append(&mut additional_update.updates);
4014                                 } else {
4015                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
4016                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
4017                                         monitor_update.update_id = new_mon_id;
4018                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
4019                                                 held_update.update.update_id += 1;
4020                                         }
4021                                         if msg.is_some() {
4022                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
4023                                                 let update = self.build_commitment_no_status_check(logger);
4024                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4025                                                         update,
4026                                                 });
4027                                         }
4028                                 }
4029
4030                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
4031                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
4032                         },
4033                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
4034                 }
4035         }
4036
4037         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
4038         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
4039         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
4040         /// before we fail backwards.
4041         ///
4042         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
4043         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
4044         /// [`ChannelError::Ignore`].
4045         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
4046         -> Result<(), ChannelError> where L::Target: Logger {
4047                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
4048                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
4049         }
4050
4051         /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
4052         /// want to fail blinded HTLCs where we are not the intro node.
4053         ///
4054         /// See [`Self::queue_fail_htlc`] for more info.
4055         pub fn queue_fail_malformed_htlc<L: Deref>(
4056                 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
4057         ) -> Result<(), ChannelError> where L::Target: Logger {
4058                 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
4059                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
4060         }
4061
4062         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
4063         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
4064         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
4065         /// before we fail backwards.
4066         ///
4067         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
4068         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
4069         /// [`ChannelError::Ignore`].
4070         fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
4071                 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
4072                 logger: &L
4073         ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
4074                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4075                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
4076                 }
4077
4078                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
4079                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
4080                 // these, but for now we just have to treat them as normal.
4081
4082                 let mut pending_idx = core::usize::MAX;
4083                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
4084                         if htlc.htlc_id == htlc_id_arg {
4085                                 match htlc.state {
4086                                         InboundHTLCState::Committed => {},
4087                                         InboundHTLCState::LocalRemoved(ref reason) => {
4088                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4089                                                 } else {
4090                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
4091                                                 }
4092                                                 return Ok(None);
4093                                         },
4094                                         _ => {
4095                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
4096                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
4097                                         }
4098                                 }
4099                                 pending_idx = idx;
4100                         }
4101                 }
4102                 if pending_idx == core::usize::MAX {
4103                         #[cfg(any(test, fuzzing))]
4104                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
4105                         // is simply a duplicate fail, not previously failed and we failed-back too early.
4106                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
4107                         return Ok(None);
4108                 }
4109
4110                 if !self.context.channel_state.can_generate_new_commitment() {
4111                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
4112                         force_holding_cell = true;
4113                 }
4114
4115                 // Now update local state:
4116                 if force_holding_cell {
4117                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
4118                                 match pending_update {
4119                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
4120                                                 if htlc_id_arg == htlc_id {
4121                                                         #[cfg(any(test, fuzzing))]
4122                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
4123                                                         return Ok(None);
4124                                                 }
4125                                         },
4126                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
4127                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
4128                                         {
4129                                                 if htlc_id_arg == htlc_id {
4130                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
4131                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
4132                                                 }
4133                                         },
4134                                         _ => {}
4135                                 }
4136                         }
4137                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
4138                         self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
4139                         return Ok(None);
4140                 }
4141
4142                 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
4143                         E::Message::name(), &self.context.channel_id());
4144                 {
4145                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
4146                         htlc.state = err_contents.clone().to_inbound_htlc_state();
4147                 }
4148
4149                 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
4150         }
4151
4152         // Message handlers:
4153         /// Updates the state of the channel to indicate that all channels in the batch have received
4154         /// funding_signed and persisted their monitors.
4155         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
4156         /// treated as a non-batch channel going forward.
4157         pub fn set_batch_ready(&mut self) {
4158                 self.context.is_batch_funding = None;
4159                 self.context.channel_state.clear_waiting_for_batch();
4160         }
4161
4162         /// Unsets the existing funding information.
4163         ///
4164         /// This must only be used if the channel has not yet completed funding and has not been used.
4165         ///
4166         /// Further, the channel must be immediately shut down after this with a call to
4167         /// [`ChannelContext::force_shutdown`].
4168         pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
4169                 debug_assert!(matches!(
4170                         self.context.channel_state, ChannelState::AwaitingChannelReady(_)
4171                 ));
4172                 self.context.channel_transaction_parameters.funding_outpoint = None;
4173                 self.context.channel_id = temporary_channel_id;
4174         }
4175
4176         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
4177         /// and the channel is now usable (and public), this may generate an announcement_signatures to
4178         /// reply with.
4179         pub fn channel_ready<NS: Deref, L: Deref>(
4180                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
4181                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
4182         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
4183         where
4184                 NS::Target: NodeSigner,
4185                 L::Target: Logger
4186         {
4187                 if self.context.channel_state.is_peer_disconnected() {
4188                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
4189                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
4190                 }
4191
4192                 if let Some(scid_alias) = msg.short_channel_id_alias {
4193                         if Some(scid_alias) != self.context.short_channel_id {
4194                                 // The scid alias provided can be used to route payments *from* our counterparty,
4195                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
4196                                 // when routing outbound payments.
4197                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
4198                         }
4199                 }
4200
4201                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
4202                 // batch, but we can receive channel_ready messages.
4203                 let mut check_reconnection = false;
4204                 match &self.context.channel_state {
4205                         ChannelState::AwaitingChannelReady(flags) => {
4206                                 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
4207                                 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4208                                 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
4209                                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
4210                                         check_reconnection = true;
4211                                 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
4212                                         self.context.channel_state.set_their_channel_ready();
4213                                 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
4214                                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
4215                                         self.context.update_time_counter += 1;
4216                                 } else {
4217                                         // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
4218                                         debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4219                                 }
4220                         }
4221                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
4222                         ChannelState::ChannelReady(_) => check_reconnection = true,
4223                         _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
4224                 }
4225                 if check_reconnection {
4226                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
4227                         // required, or they're sending a fresh SCID alias.
4228                         let expected_point =
4229                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4230                                         // If they haven't ever sent an updated point, the point they send should match
4231                                         // the current one.
4232                                         self.context.counterparty_cur_commitment_point
4233                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
4234                                         // If we've advanced the commitment number once, the second commitment point is
4235                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
4236                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
4237                                         self.context.counterparty_prev_commitment_point
4238                                 } else {
4239                                         // If they have sent updated points, channel_ready is always supposed to match
4240                                         // their "first" point, which we re-derive here.
4241                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
4242                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
4243                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
4244                                 };
4245                         if expected_point != Some(msg.next_per_commitment_point) {
4246                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
4247                         }
4248                         return Ok(None);
4249                 }
4250
4251                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4252                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4253
4254                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
4255
4256                 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
4257         }
4258
4259         pub fn update_add_htlc<F: Deref>(
4260                 &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
4261                 fee_estimator: &LowerBoundedFeeEstimator<F>,
4262         ) -> Result<(), ChannelError> where F::Target: FeeEstimator {
4263                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4264                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4265                 }
4266                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
4267                 if self.context.channel_state.is_remote_shutdown_sent() {
4268                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4269                 }
4270                 if self.context.channel_state.is_peer_disconnected() {
4271                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
4272                 }
4273                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
4274                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
4275                 }
4276                 if msg.amount_msat == 0 {
4277                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
4278                 }
4279                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
4280                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
4281                 }
4282
4283                 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
4284                 let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
4285                 if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize {
4286                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
4287                 }
4288                 if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
4289                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
4290                 }
4291
4292                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
4293                 // the reserve_satoshis we told them to always have as direct payment so that they lose
4294                 // something if we punish them for broadcasting an old state).
4295                 // Note that we don't really care about having a small/no to_remote output in our local
4296                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
4297                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
4298                 // present in the next commitment transaction we send them (at least for fulfilled ones,
4299                 // failed ones won't modify value_to_self).
4300                 // Note that we will send HTLCs which another instance of rust-lightning would think
4301                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
4302                 // Channel state once they will not be present in the next received commitment
4303                 // transaction).
4304                 let mut removed_outbound_total_msat = 0;
4305                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
4306                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
4307                                 removed_outbound_total_msat += htlc.amount_msat;
4308                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
4309                                 removed_outbound_total_msat += htlc.amount_msat;
4310                         }
4311                 }
4312
4313                 let pending_value_to_self_msat =
4314                         self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
4315                 let pending_remote_value_msat =
4316                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
4317                 if pending_remote_value_msat < msg.amount_msat {
4318                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
4319                 }
4320
4321                 // Check that the remote can afford to pay for this HTLC on-chain at the current
4322                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
4323                 {
4324                         let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
4325                                 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4326                                 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
4327                         };
4328                         let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4329                                 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4330                         } else {
4331                                 0
4332                         };
4333                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
4334                                 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
4335                         };
4336                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
4337                                 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
4338                         }
4339                 }
4340
4341                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4342                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4343                 } else {
4344                         0
4345                 };
4346                 if self.context.is_outbound() {
4347                         // Check that they won't violate our local required channel reserve by adding this HTLC.
4348                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4349                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
4350                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
4351                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
4352                         }
4353                 }
4354                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
4355                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
4356                 }
4357                 if msg.cltv_expiry >= 500000000 {
4358                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
4359                 }
4360
4361                 if self.context.channel_state.is_local_shutdown_sent() {
4362                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4363                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4364                         }
4365                 }
4366
4367                 // Now update local state:
4368                 self.context.next_counterparty_htlc_id += 1;
4369                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
4370                         htlc_id: msg.htlc_id,
4371                         amount_msat: msg.amount_msat,
4372                         payment_hash: msg.payment_hash,
4373                         cltv_expiry: msg.cltv_expiry,
4374                         state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
4375                                 pending_htlc_status: pending_forward_status
4376                         }),
4377                 });
4378                 Ok(())
4379         }
4380
4381         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
4382         #[inline]
4383         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
4384                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
4385                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4386                         if htlc.htlc_id == htlc_id {
4387                                 let outcome = match check_preimage {
4388                                         None => fail_reason.into(),
4389                                         Some(payment_preimage) => {
4390                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
4391                                                 if payment_hash != htlc.payment_hash {
4392                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
4393                                                 }
4394                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
4395                                         }
4396                                 };
4397                                 match htlc.state {
4398                                         OutboundHTLCState::LocalAnnounced(_) =>
4399                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
4400                                         OutboundHTLCState::Committed => {
4401                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
4402                                         },
4403                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
4404                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
4405                                 }
4406                                 return Ok(htlc);
4407                         }
4408                 }
4409                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4410         }
4411
4412         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4413                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4414                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4415                 }
4416                 if self.context.channel_state.is_peer_disconnected() {
4417                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4418                 }
4419
4420                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4421         }
4422
4423         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4424                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4425                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4426                 }
4427                 if self.context.channel_state.is_peer_disconnected() {
4428                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4429                 }
4430
4431                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4432                 Ok(())
4433         }
4434
4435         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4436                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4437                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4438                 }
4439                 if self.context.channel_state.is_peer_disconnected() {
4440                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4441                 }
4442
4443                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4444                 Ok(())
4445         }
4446
4447         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4448                 where L::Target: Logger
4449         {
4450                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4451                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4452                 }
4453                 if self.context.channel_state.is_peer_disconnected() {
4454                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4455                 }
4456                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4457                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4458                 }
4459
4460                 let funding_script = self.context.get_funding_redeemscript();
4461
4462                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4463
4464                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4465                 let commitment_txid = {
4466                         let trusted_tx = commitment_stats.tx.trust();
4467                         let bitcoin_tx = trusted_tx.built_transaction();
4468                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4469
4470                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4471                                 log_bytes!(msg.signature.serialize_compact()[..]),
4472                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4473                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4474                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4475                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4476                         }
4477                         bitcoin_tx.txid
4478                 };
4479                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4480
4481                 // If our counterparty updated the channel fee in this commitment transaction, check that
4482                 // they can actually afford the new fee now.
4483                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4484                         update_state == FeeUpdateState::RemoteAnnounced
4485                 } else { false };
4486                 if update_fee {
4487                         debug_assert!(!self.context.is_outbound());
4488                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4489                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4490                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4491                         }
4492                 }
4493                 #[cfg(any(test, fuzzing))]
4494                 {
4495                         if self.context.is_outbound() {
4496                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4497                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4498                                 if let Some(info) = projected_commit_tx_info {
4499                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4500                                                 + self.context.holding_cell_htlc_updates.len();
4501                                         if info.total_pending_htlcs == total_pending_htlcs
4502                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4503                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4504                                                 && info.feerate == self.context.feerate_per_kw {
4505                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4506                                                 }
4507                                 }
4508                         }
4509                 }
4510
4511                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4512                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4513                 }
4514
4515                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4516                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4517                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4518                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4519                 // backwards compatibility, we never use it in production. To provide test coverage, here,
4520                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4521                 #[allow(unused_assignments, unused_mut)]
4522                 let mut separate_nondust_htlc_sources = false;
4523                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4524                         use core::hash::{BuildHasher, Hasher};
4525                         // Get a random value using the only std API to do so - the DefaultHasher
4526                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4527                         separate_nondust_htlc_sources = rand_val % 2 == 0;
4528                 }
4529
4530                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4531                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4532                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4533                         if let Some(_) = htlc.transaction_output_index {
4534                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4535                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4536                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4537
4538                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4539                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4540                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
4541                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4542                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4543                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4544                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4545                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4546                                 }
4547                                 if !separate_nondust_htlc_sources {
4548                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4549                                 }
4550                         } else {
4551                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4552                         }
4553                         if separate_nondust_htlc_sources {
4554                                 if let Some(source) = source_opt.take() {
4555                                         nondust_htlc_sources.push(source);
4556                                 }
4557                         }
4558                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4559                 }
4560
4561                 let holder_commitment_tx = HolderCommitmentTransaction::new(
4562                         commitment_stats.tx,
4563                         msg.signature,
4564                         msg.htlc_signatures.clone(),
4565                         &self.context.get_holder_pubkeys().funding_pubkey,
4566                         self.context.counterparty_funding_pubkey()
4567                 );
4568
4569                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4570                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4571
4572                 // Update state now that we've passed all the can-fail calls...
4573                 let mut need_commitment = false;
4574                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4575                         if *update_state == FeeUpdateState::RemoteAnnounced {
4576                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4577                                 need_commitment = true;
4578                         }
4579                 }
4580
4581                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4582                         let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state {
4583                                 Some(resolution.clone())
4584                         } else { None };
4585                         if let Some(htlc_resolution) = htlc_resolution {
4586                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4587                                         &htlc.payment_hash, &self.context.channel_id);
4588                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution);
4589                                 need_commitment = true;
4590                         }
4591                 }
4592                 let mut claimed_htlcs = Vec::new();
4593                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4594                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4595                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4596                                         &htlc.payment_hash, &self.context.channel_id);
4597                                 // Grab the preimage, if it exists, instead of cloning
4598                                 let mut reason = OutboundHTLCOutcome::Success(None);
4599                                 mem::swap(outcome, &mut reason);
4600                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4601                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4602                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4603                                         // have a `Success(None)` reason. In this case we could forget some HTLC
4604                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
4605                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
4606                                         // claim anyway.
4607                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4608                                 }
4609                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4610                                 need_commitment = true;
4611                         }
4612                 }
4613
4614                 self.context.latest_monitor_update_id += 1;
4615                 let mut monitor_update = ChannelMonitorUpdate {
4616                         update_id: self.context.latest_monitor_update_id,
4617                         counterparty_node_id: Some(self.context.counterparty_node_id),
4618                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4619                                 commitment_tx: holder_commitment_tx,
4620                                 htlc_outputs: htlcs_and_sigs,
4621                                 claimed_htlcs,
4622                                 nondust_htlc_sources,
4623                         }],
4624                         channel_id: Some(self.context.channel_id()),
4625                 };
4626
4627                 self.context.cur_holder_commitment_transaction_number -= 1;
4628                 self.context.expecting_peer_commitment_signed = false;
4629                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4630                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4631                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4632
4633                 if self.context.channel_state.is_monitor_update_in_progress() {
4634                         // In case we initially failed monitor updating without requiring a response, we need
4635                         // to make sure the RAA gets sent first.
4636                         self.context.monitor_pending_revoke_and_ack = true;
4637                         if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4638                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4639                                 // the corresponding HTLC status updates so that
4640                                 // get_last_commitment_update_for_send includes the right HTLCs.
4641                                 self.context.monitor_pending_commitment_signed = true;
4642                                 let mut additional_update = self.build_commitment_no_status_check(logger);
4643                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4644                                 // strictly increasing by one, so decrement it here.
4645                                 self.context.latest_monitor_update_id = monitor_update.update_id;
4646                                 monitor_update.updates.append(&mut additional_update.updates);
4647                         }
4648                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4649                                 &self.context.channel_id);
4650                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
4651                 }
4652
4653                 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4654                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4655                         // we'll send one right away when we get the revoke_and_ack when we
4656                         // free_holding_cell_htlcs().
4657                         let mut additional_update = self.build_commitment_no_status_check(logger);
4658                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4659                         // strictly increasing by one, so decrement it here.
4660                         self.context.latest_monitor_update_id = monitor_update.update_id;
4661                         monitor_update.updates.append(&mut additional_update.updates);
4662                         true
4663                 } else { false };
4664
4665                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4666                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4667                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4668                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4669         }
4670
4671         /// Public version of the below, checking relevant preconditions first.
4672         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4673         /// returns `(None, Vec::new())`.
4674         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4675                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4676         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4677         where F::Target: FeeEstimator, L::Target: Logger
4678         {
4679                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4680                         self.free_holding_cell_htlcs(fee_estimator, logger)
4681                 } else { (None, Vec::new()) }
4682         }
4683
4684         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4685         /// for our counterparty.
4686         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4687                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4688         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4689         where F::Target: FeeEstimator, L::Target: Logger
4690         {
4691                 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4692                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4693                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4694                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4695
4696                         let mut monitor_update = ChannelMonitorUpdate {
4697                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4698                                 counterparty_node_id: Some(self.context.counterparty_node_id),
4699                                 updates: Vec::new(),
4700                                 channel_id: Some(self.context.channel_id()),
4701                         };
4702
4703                         let mut htlc_updates = Vec::new();
4704                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4705                         let mut update_add_count = 0;
4706                         let mut update_fulfill_count = 0;
4707                         let mut update_fail_count = 0;
4708                         let mut htlcs_to_fail = Vec::new();
4709                         for htlc_update in htlc_updates.drain(..) {
4710                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
4711                                 // fee races with adding too many outputs which push our total payments just over
4712                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
4713                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4714                                 // to rebalance channels.
4715                                 let fail_htlc_res = match &htlc_update {
4716                                         &HTLCUpdateAwaitingACK::AddHTLC {
4717                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4718                                                 skimmed_fee_msat, blinding_point, ..
4719                                         } => {
4720                                                 match self.send_htlc(
4721                                                         amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4722                                                         false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4723                                                 ) {
4724                                                         Ok(_) => update_add_count += 1,
4725                                                         Err(e) => {
4726                                                                 match e {
4727                                                                         ChannelError::Ignore(ref msg) => {
4728                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4729                                                                                 // If we fail to send here, then this HTLC should
4730                                                                                 // be failed backwards. Failing to send here
4731                                                                                 // indicates that this HTLC may keep being put back
4732                                                                                 // into the holding cell without ever being
4733                                                                                 // successfully forwarded/failed/fulfilled, causing
4734                                                                                 // our counterparty to eventually close on us.
4735                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
4736                                                                         },
4737                                                                         _ => {
4738                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4739                                                                         },
4740                                                                 }
4741                                                         }
4742                                                 }
4743                                                 None
4744                                         },
4745                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4746                                                 // If an HTLC claim was previously added to the holding cell (via
4747                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
4748                                                 // not fail - any in between attempts to claim the HTLC will have resulted
4749                                                 // in it hitting the holding cell again and we cannot change the state of a
4750                                                 // holding cell HTLC from fulfill to anything else.
4751                                                 let mut additional_monitor_update =
4752                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4753                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4754                                                         { monitor_update } else { unreachable!() };
4755                                                 update_fulfill_count += 1;
4756                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
4757                                                 None
4758                                         },
4759                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4760                                                 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4761                                                  .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4762                                         },
4763                                         &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4764                                                 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4765                                                  .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4766                                         }
4767                                 };
4768                                 if let Some(res) = fail_htlc_res {
4769                                         match res {
4770                                                 Ok(fail_msg_opt) => {
4771                                                         // If an HTLC failure was previously added to the holding cell (via
4772                                                         // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4773                                                         // not fail - we should never end up in a state where we double-fail
4774                                                         // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4775                                                         // for a full revocation before failing.
4776                                                         debug_assert!(fail_msg_opt.is_some());
4777                                                         update_fail_count += 1;
4778                                                 },
4779                                                 Err(ChannelError::Ignore(_)) => {},
4780                                                 Err(_) => {
4781                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4782                                                 },
4783                                         }
4784                                 }
4785                         }
4786                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4787                                 return (None, htlcs_to_fail);
4788                         }
4789                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4790                                 self.send_update_fee(feerate, false, fee_estimator, logger)
4791                         } else {
4792                                 None
4793                         };
4794
4795                         let mut additional_update = self.build_commitment_no_status_check(logger);
4796                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4797                         // but we want them to be strictly increasing by one, so reset it here.
4798                         self.context.latest_monitor_update_id = monitor_update.update_id;
4799                         monitor_update.updates.append(&mut additional_update.updates);
4800
4801                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4802                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4803                                 update_add_count, update_fulfill_count, update_fail_count);
4804
4805                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4806                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4807                 } else {
4808                         (None, Vec::new())
4809                 }
4810         }
4811
4812         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4813         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4814         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4815         /// generating an appropriate error *after* the channel state has been updated based on the
4816         /// revoke_and_ack message.
4817         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4818                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4819         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4820         where F::Target: FeeEstimator, L::Target: Logger,
4821         {
4822                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4823                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4824                 }
4825                 if self.context.channel_state.is_peer_disconnected() {
4826                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4827                 }
4828                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4829                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4830                 }
4831
4832                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4833
4834                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4835                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4836                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4837                         }
4838                 }
4839
4840                 if !self.context.channel_state.is_awaiting_remote_revoke() {
4841                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
4842                         // haven't given them a new commitment transaction to broadcast). We should probably
4843                         // take advantage of this by updating our channel monitor, sending them an error, and
4844                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4845                         // lot of work, and there's some chance this is all a misunderstanding anyway.
4846                         // We have to do *something*, though, since our signer may get mad at us for otherwise
4847                         // jumping a remote commitment number, so best to just force-close and move on.
4848                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4849                 }
4850
4851                 #[cfg(any(test, fuzzing))]
4852                 {
4853                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4854                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4855                 }
4856
4857                 match &self.context.holder_signer {
4858                         ChannelSignerType::Ecdsa(ecdsa) => {
4859                                 ecdsa.validate_counterparty_revocation(
4860                                         self.context.cur_counterparty_commitment_transaction_number + 1,
4861                                         &secret
4862                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4863                         },
4864                         // TODO (taproot|arik)
4865                         #[cfg(taproot)]
4866                         _ => todo!()
4867                 };
4868
4869                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4870                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4871                 self.context.latest_monitor_update_id += 1;
4872                 let mut monitor_update = ChannelMonitorUpdate {
4873                         update_id: self.context.latest_monitor_update_id,
4874                         counterparty_node_id: Some(self.context.counterparty_node_id),
4875                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4876                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4877                                 secret: msg.per_commitment_secret,
4878                         }],
4879                         channel_id: Some(self.context.channel_id()),
4880                 };
4881
4882                 // Update state now that we've passed all the can-fail calls...
4883                 // (note that we may still fail to generate the new commitment_signed message, but that's
4884                 // OK, we step the channel here and *then* if the new generation fails we can fail the
4885                 // channel based on that, but stepping stuff here should be safe either way.
4886                 self.context.channel_state.clear_awaiting_remote_revoke();
4887                 self.context.sent_message_awaiting_response = None;
4888                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4889                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4890                 self.context.cur_counterparty_commitment_transaction_number -= 1;
4891
4892                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4893                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4894                 }
4895
4896                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4897                 let mut to_forward_infos = Vec::new();
4898                 let mut pending_update_adds = Vec::new();
4899                 let mut revoked_htlcs = Vec::new();
4900                 let mut finalized_claimed_htlcs = Vec::new();
4901                 let mut update_fail_htlcs = Vec::new();
4902                 let mut update_fail_malformed_htlcs = Vec::new();
4903                 let mut require_commitment = false;
4904                 let mut value_to_self_msat_diff: i64 = 0;
4905
4906                 {
4907                         // Take references explicitly so that we can hold multiple references to self.context.
4908                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4909                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4910                         let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4911
4912                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4913                         pending_inbound_htlcs.retain(|htlc| {
4914                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4915                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4916                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4917                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
4918                                         }
4919                                         *expecting_peer_commitment_signed = true;
4920                                         false
4921                                 } else { true }
4922                         });
4923                         pending_outbound_htlcs.retain(|htlc| {
4924                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4925                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4926                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4927                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4928                                         } else {
4929                                                 finalized_claimed_htlcs.push(htlc.source.clone());
4930                                                 // They fulfilled, so we sent them money
4931                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
4932                                         }
4933                                         false
4934                                 } else { true }
4935                         });
4936                         for htlc in pending_inbound_htlcs.iter_mut() {
4937                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4938                                         true
4939                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4940                                         true
4941                                 } else { false };
4942                                 if swap {
4943                                         let mut state = InboundHTLCState::Committed;
4944                                         mem::swap(&mut state, &mut htlc.state);
4945
4946                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state {
4947                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4948                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution);
4949                                                 require_commitment = true;
4950                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state {
4951                                                 match resolution {
4952                                                         InboundHTLCResolution::Resolved { pending_htlc_status } =>
4953                                                                 match pending_htlc_status {
4954                                                                         PendingHTLCStatus::Fail(fail_msg) => {
4955                                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4956                                                                                 require_commitment = true;
4957                                                                                 match fail_msg {
4958                                                                                         HTLCFailureMsg::Relay(msg) => {
4959                                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4960                                                                                                 update_fail_htlcs.push(msg)
4961                                                                                         },
4962                                                                                         HTLCFailureMsg::Malformed(msg) => {
4963                                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4964                                                                                                 update_fail_malformed_htlcs.push(msg)
4965                                                                                         },
4966                                                                                 }
4967                                                                         },
4968                                                                         PendingHTLCStatus::Forward(forward_info) => {
4969                                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash);
4970                                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
4971                                                                                 htlc.state = InboundHTLCState::Committed;
4972                                                                         }
4973                                                                 }
4974                                                         InboundHTLCResolution::Pending { update_add_htlc } => {
4975                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4976                                                                 pending_update_adds.push(update_add_htlc);
4977                                                                 htlc.state = InboundHTLCState::Committed;
4978                                                         }
4979                                                 }
4980                                         }
4981                                 }
4982                         }
4983                         for htlc in pending_outbound_htlcs.iter_mut() {
4984                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4985                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4986                                         htlc.state = OutboundHTLCState::Committed;
4987                                         *expecting_peer_commitment_signed = true;
4988                                 }
4989                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4990                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4991                                         // Grab the preimage, if it exists, instead of cloning
4992                                         let mut reason = OutboundHTLCOutcome::Success(None);
4993                                         mem::swap(outcome, &mut reason);
4994                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4995                                         require_commitment = true;
4996                                 }
4997                         }
4998                 }
4999                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
5000
5001                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
5002                         match update_state {
5003                                 FeeUpdateState::Outbound => {
5004                                         debug_assert!(self.context.is_outbound());
5005                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
5006                                         self.context.feerate_per_kw = feerate;
5007                                         self.context.pending_update_fee = None;
5008                                         self.context.expecting_peer_commitment_signed = true;
5009                                 },
5010                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
5011                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
5012                                         debug_assert!(!self.context.is_outbound());
5013                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
5014                                         require_commitment = true;
5015                                         self.context.feerate_per_kw = feerate;
5016                                         self.context.pending_update_fee = None;
5017                                 },
5018                         }
5019                 }
5020
5021                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
5022                 let release_state_str =
5023                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
5024                 macro_rules! return_with_htlcs_to_fail {
5025                         ($htlcs_to_fail: expr) => {
5026                                 if !release_monitor {
5027                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5028                                                 update: monitor_update,
5029                                         });
5030                                         return Ok(($htlcs_to_fail, None));
5031                                 } else {
5032                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
5033                                 }
5034                         }
5035                 }
5036
5037                 self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
5038
5039                 if self.context.channel_state.is_monitor_update_in_progress() {
5040                         // We can't actually generate a new commitment transaction (incl by freeing holding
5041                         // cells) while we can't update the monitor, so we just return what we have.
5042                         if require_commitment {
5043                                 self.context.monitor_pending_commitment_signed = true;
5044                                 // When the monitor updating is restored we'll call
5045                                 // get_last_commitment_update_for_send(), which does not update state, but we're
5046                                 // definitely now awaiting a remote revoke before we can step forward any more, so
5047                                 // set it here.
5048                                 let mut additional_update = self.build_commitment_no_status_check(logger);
5049                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
5050                                 // strictly increasing by one, so decrement it here.
5051                                 self.context.latest_monitor_update_id = monitor_update.update_id;
5052                                 monitor_update.updates.append(&mut additional_update.updates);
5053                         }
5054                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
5055                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
5056                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
5057                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
5058                         return_with_htlcs_to_fail!(Vec::new());
5059                 }
5060
5061                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
5062                         (Some(mut additional_update), htlcs_to_fail) => {
5063                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
5064                                 // strictly increasing by one, so decrement it here.
5065                                 self.context.latest_monitor_update_id = monitor_update.update_id;
5066                                 monitor_update.updates.append(&mut additional_update.updates);
5067
5068                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
5069                                         &self.context.channel_id(), release_state_str);
5070
5071                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
5072                                 return_with_htlcs_to_fail!(htlcs_to_fail);
5073                         },
5074                         (None, htlcs_to_fail) => {
5075                                 if require_commitment {
5076                                         let mut additional_update = self.build_commitment_no_status_check(logger);
5077
5078                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
5079                                         // strictly increasing by one, so decrement it here.
5080                                         self.context.latest_monitor_update_id = monitor_update.update_id;
5081                                         monitor_update.updates.append(&mut additional_update.updates);
5082
5083                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
5084                                                 &self.context.channel_id(),
5085                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
5086                                                 release_state_str);
5087
5088                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
5089                                         return_with_htlcs_to_fail!(htlcs_to_fail);
5090                                 } else {
5091                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
5092                                                 &self.context.channel_id(), release_state_str);
5093
5094                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
5095                                         return_with_htlcs_to_fail!(htlcs_to_fail);
5096                                 }
5097                         }
5098                 }
5099         }
5100
5101         /// Queues up an outbound update fee by placing it in the holding cell. You should call
5102         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
5103         /// commitment update.
5104         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
5105                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5106         where F::Target: FeeEstimator, L::Target: Logger
5107         {
5108                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
5109                 assert!(msg_opt.is_none(), "We forced holding cell?");
5110         }
5111
5112         /// Adds a pending update to this channel. See the doc for send_htlc for
5113         /// further details on the optionness of the return value.
5114         /// If our balance is too low to cover the cost of the next commitment transaction at the
5115         /// new feerate, the update is cancelled.
5116         ///
5117         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
5118         /// [`Channel`] if `force_holding_cell` is false.
5119         fn send_update_fee<F: Deref, L: Deref>(
5120                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
5121                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
5122         ) -> Option<msgs::UpdateFee>
5123         where F::Target: FeeEstimator, L::Target: Logger
5124         {
5125                 if !self.context.is_outbound() {
5126                         panic!("Cannot send fee from inbound channel");
5127                 }
5128                 if !self.context.is_usable() {
5129                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
5130                 }
5131                 if !self.context.is_live() {
5132                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
5133                 }
5134
5135                 // Before proposing a feerate update, check that we can actually afford the new fee.
5136                 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
5137                 let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw), dust_exposure_limiting_feerate);
5138                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
5139                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
5140                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
5141                 let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat;
5142                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
5143                         //TODO: auto-close after a number of failures?
5144                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
5145                         return None;
5146                 }
5147
5148                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
5149                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
5150                 if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5151                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5152                         return None;
5153                 }
5154                 if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5155                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5156                         return None;
5157                 }
5158
5159                 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
5160                         force_holding_cell = true;
5161                 }
5162
5163                 if force_holding_cell {
5164                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
5165                         return None;
5166                 }
5167
5168                 debug_assert!(self.context.pending_update_fee.is_none());
5169                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
5170
5171                 Some(msgs::UpdateFee {
5172                         channel_id: self.context.channel_id,
5173                         feerate_per_kw,
5174                 })
5175         }
5176
5177         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
5178         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
5179         /// resent.
5180         /// No further message handling calls may be made until a channel_reestablish dance has
5181         /// completed.
5182         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
5183         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
5184                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5185                 if self.context.channel_state.is_pre_funded_state() {
5186                         return Err(())
5187                 }
5188
5189                 if self.context.channel_state.is_peer_disconnected() {
5190                         // While the below code should be idempotent, it's simpler to just return early, as
5191                         // redundant disconnect events can fire, though they should be rare.
5192                         return Ok(());
5193                 }
5194
5195                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
5196                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
5197                 }
5198
5199                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
5200                 // will be retransmitted.
5201                 self.context.last_sent_closing_fee = None;
5202                 self.context.pending_counterparty_closing_signed = None;
5203                 self.context.closing_fee_limits = None;
5204
5205                 let mut inbound_drop_count = 0;
5206                 self.context.pending_inbound_htlcs.retain(|htlc| {
5207                         match htlc.state {
5208                                 InboundHTLCState::RemoteAnnounced(_) => {
5209                                         // They sent us an update_add_htlc but we never got the commitment_signed.
5210                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
5211                                         // this HTLC accordingly
5212                                         inbound_drop_count += 1;
5213                                         false
5214                                 },
5215                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
5216                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
5217                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
5218                                         // in response to it yet, so don't touch it.
5219                                         true
5220                                 },
5221                                 InboundHTLCState::Committed => true,
5222                                 InboundHTLCState::LocalRemoved(_) => {
5223                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
5224                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
5225                                         // (that we missed). Keep this around for now and if they tell us they missed
5226                                         // the commitment_signed we can re-transmit the update then.
5227                                         true
5228                                 },
5229                         }
5230                 });
5231                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
5232
5233                 if let Some((_, update_state)) = self.context.pending_update_fee {
5234                         if update_state == FeeUpdateState::RemoteAnnounced {
5235                                 debug_assert!(!self.context.is_outbound());
5236                                 self.context.pending_update_fee = None;
5237                         }
5238                 }
5239
5240                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5241                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
5242                                 // They sent us an update to remove this but haven't yet sent the corresponding
5243                                 // commitment_signed, we need to move it back to Committed and they can re-send
5244                                 // the update upon reconnection.
5245                                 htlc.state = OutboundHTLCState::Committed;
5246                         }
5247                 }
5248
5249                 self.context.sent_message_awaiting_response = None;
5250
5251                 self.context.channel_state.set_peer_disconnected();
5252                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
5253                 Ok(())
5254         }
5255
5256         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
5257         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
5258         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
5259         /// update completes (potentially immediately).
5260         /// The messages which were generated with the monitor update must *not* have been sent to the
5261         /// remote end, and must instead have been dropped. They will be regenerated when
5262         /// [`Self::monitor_updating_restored`] is called.
5263         ///
5264         /// [`ChannelManager`]: super::channelmanager::ChannelManager
5265         /// [`chain::Watch`]: crate::chain::Watch
5266         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5267         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
5268                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
5269                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
5270                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
5271         ) {
5272                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
5273                 self.context.monitor_pending_commitment_signed |= resend_commitment;
5274                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
5275                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
5276                 self.context.monitor_pending_failures.append(&mut pending_fails);
5277                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
5278                 self.context.channel_state.set_monitor_update_in_progress();
5279         }
5280
5281         /// Indicates that the latest ChannelMonitor update has been committed by the client
5282         /// successfully and we should restore normal operation. Returns messages which should be sent
5283         /// to the remote side.
5284         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
5285                 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
5286                 user_config: &UserConfig, best_block_height: u32
5287         ) -> MonitorRestoreUpdates
5288         where
5289                 L::Target: Logger,
5290                 NS::Target: NodeSigner
5291         {
5292                 assert!(self.context.channel_state.is_monitor_update_in_progress());
5293                 self.context.channel_state.clear_monitor_update_in_progress();
5294
5295                 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
5296                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
5297                 // first received the funding_signed.
5298                 let mut funding_broadcastable =
5299                         if self.context.is_outbound() &&
5300                                 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
5301                                 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
5302                         {
5303                                 self.context.funding_transaction.take()
5304                         } else { None };
5305                 // That said, if the funding transaction is already confirmed (ie we're active with a
5306                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
5307                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
5308                         funding_broadcastable = None;
5309                 }
5310
5311                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
5312                 // (and we assume the user never directly broadcasts the funding transaction and waits for
5313                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
5314                 // * an inbound channel that failed to persist the monitor on funding_created and we got
5315                 //   the funding transaction confirmed before the monitor was persisted, or
5316                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
5317                 let channel_ready = if self.context.monitor_pending_channel_ready {
5318                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
5319                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
5320                         self.context.monitor_pending_channel_ready = false;
5321                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5322                         Some(msgs::ChannelReady {
5323                                 channel_id: self.context.channel_id(),
5324                                 next_per_commitment_point,
5325                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5326                         })
5327                 } else { None };
5328
5329                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
5330
5331                 let mut accepted_htlcs = Vec::new();
5332                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
5333                 let mut failed_htlcs = Vec::new();
5334                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
5335                 let mut finalized_claimed_htlcs = Vec::new();
5336                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
5337                 let mut pending_update_adds = Vec::new();
5338                 mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds);
5339
5340                 if self.context.channel_state.is_peer_disconnected() {
5341                         self.context.monitor_pending_revoke_and_ack = false;
5342                         self.context.monitor_pending_commitment_signed = false;
5343                         return MonitorRestoreUpdates {
5344                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
5345                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds,
5346                                 funding_broadcastable, channel_ready, announcement_sigs
5347                         };
5348                 }
5349
5350                 let raa = if self.context.monitor_pending_revoke_and_ack {
5351                         Some(self.get_last_revoke_and_ack())
5352                 } else { None };
5353                 let commitment_update = if self.context.monitor_pending_commitment_signed {
5354                         self.get_last_commitment_update_for_send(logger).ok()
5355                 } else { None };
5356                 if commitment_update.is_some() {
5357                         self.mark_awaiting_response();
5358                 }
5359
5360                 self.context.monitor_pending_revoke_and_ack = false;
5361                 self.context.monitor_pending_commitment_signed = false;
5362                 let order = self.context.resend_order.clone();
5363                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
5364                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
5365                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
5366                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
5367                 MonitorRestoreUpdates {
5368                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs,
5369                         pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs
5370                 }
5371         }
5372
5373         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
5374                 where F::Target: FeeEstimator, L::Target: Logger
5375         {
5376                 if self.context.is_outbound() {
5377                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
5378                 }
5379                 if self.context.channel_state.is_peer_disconnected() {
5380                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
5381                 }
5382                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
5383
5384                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
5385                 self.context.update_time_counter += 1;
5386                 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
5387                 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
5388                 let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
5389                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
5390                 if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5391                         return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
5392                                 msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat)));
5393                 }
5394                 if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5395                         return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
5396                                 msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat)));
5397                 }
5398                 Ok(())
5399         }
5400
5401         /// Indicates that the signer may have some signatures for us, so we should retry if we're
5402         /// blocked.
5403         #[cfg(async_signing)]
5404         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
5405                 let commitment_update = if self.context.signer_pending_commitment_update {
5406                         self.get_last_commitment_update_for_send(logger).ok()
5407                 } else { None };
5408                 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
5409                         self.context.get_funding_signed_msg(logger).1
5410                 } else { None };
5411                 let channel_ready = if funding_signed.is_some() {
5412                         self.check_get_channel_ready(0)
5413                 } else { None };
5414
5415                 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5416                         if commitment_update.is_some() { "a" } else { "no" },
5417                         if funding_signed.is_some() { "a" } else { "no" },
5418                         if channel_ready.is_some() { "a" } else { "no" });
5419
5420                 SignerResumeUpdates {
5421                         commitment_update,
5422                         funding_signed,
5423                         channel_ready,
5424                 }
5425         }
5426
5427         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5428                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5429                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5430                 msgs::RevokeAndACK {
5431                         channel_id: self.context.channel_id,
5432                         per_commitment_secret,
5433                         next_per_commitment_point,
5434                         #[cfg(taproot)]
5435                         next_local_nonce: None,
5436                 }
5437         }
5438
5439         /// Gets the last commitment update for immediate sending to our peer.
5440         fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5441                 let mut update_add_htlcs = Vec::new();
5442                 let mut update_fulfill_htlcs = Vec::new();
5443                 let mut update_fail_htlcs = Vec::new();
5444                 let mut update_fail_malformed_htlcs = Vec::new();
5445
5446                 for htlc in self.context.pending_outbound_htlcs.iter() {
5447                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5448                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
5449                                         channel_id: self.context.channel_id(),
5450                                         htlc_id: htlc.htlc_id,
5451                                         amount_msat: htlc.amount_msat,
5452                                         payment_hash: htlc.payment_hash,
5453                                         cltv_expiry: htlc.cltv_expiry,
5454                                         onion_routing_packet: (**onion_packet).clone(),
5455                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
5456                                         blinding_point: htlc.blinding_point,
5457                                 });
5458                         }
5459                 }
5460
5461                 for htlc in self.context.pending_inbound_htlcs.iter() {
5462                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5463                                 match reason {
5464                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5465                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5466                                                         channel_id: self.context.channel_id(),
5467                                                         htlc_id: htlc.htlc_id,
5468                                                         reason: err_packet.clone()
5469                                                 });
5470                                         },
5471                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5472                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5473                                                         channel_id: self.context.channel_id(),
5474                                                         htlc_id: htlc.htlc_id,
5475                                                         sha256_of_onion: sha256_of_onion.clone(),
5476                                                         failure_code: failure_code.clone(),
5477                                                 });
5478                                         },
5479                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5480                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5481                                                         channel_id: self.context.channel_id(),
5482                                                         htlc_id: htlc.htlc_id,
5483                                                         payment_preimage: payment_preimage.clone(),
5484                                                 });
5485                                         },
5486                                 }
5487                         }
5488                 }
5489
5490                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5491                         Some(msgs::UpdateFee {
5492                                 channel_id: self.context.channel_id(),
5493                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5494                         })
5495                 } else { None };
5496
5497                 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5498                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5499                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5500                 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5501                         if self.context.signer_pending_commitment_update {
5502                                 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5503                                 self.context.signer_pending_commitment_update = false;
5504                         }
5505                         update
5506                 } else {
5507                         #[cfg(not(async_signing))] {
5508                                 panic!("Failed to get signature for new commitment state");
5509                         }
5510                         #[cfg(async_signing)] {
5511                                 if !self.context.signer_pending_commitment_update {
5512                                         log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5513                                         self.context.signer_pending_commitment_update = true;
5514                                 }
5515                                 return Err(());
5516                         }
5517                 };
5518                 Ok(msgs::CommitmentUpdate {
5519                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5520                         commitment_signed,
5521                 })
5522         }
5523
5524         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5525         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5526                 if self.context.channel_state.is_local_shutdown_sent() {
5527                         assert!(self.context.shutdown_scriptpubkey.is_some());
5528                         Some(msgs::Shutdown {
5529                                 channel_id: self.context.channel_id,
5530                                 scriptpubkey: self.get_closing_scriptpubkey(),
5531                         })
5532                 } else { None }
5533         }
5534
5535         /// May panic if some calls other than message-handling calls (which will all Err immediately)
5536         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5537         ///
5538         /// Some links printed in log lines are included here to check them during build (when run with
5539         /// `cargo doc --document-private-items`):
5540         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5541         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5542         pub fn channel_reestablish<L: Deref, NS: Deref>(
5543                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5544                 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5545         ) -> Result<ReestablishResponses, ChannelError>
5546         where
5547                 L::Target: Logger,
5548                 NS::Target: NodeSigner
5549         {
5550                 if !self.context.channel_state.is_peer_disconnected() {
5551                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5552                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
5553                         // just close here instead of trying to recover.
5554                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5555                 }
5556
5557                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5558                         msg.next_local_commitment_number == 0 {
5559                         return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5560                 }
5561
5562                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5563                 if msg.next_remote_commitment_number > 0 {
5564                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5565                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5566                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5567                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5568                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5569                         }
5570                         if msg.next_remote_commitment_number > our_commitment_transaction {
5571                                 macro_rules! log_and_panic {
5572                                         ($err_msg: expr) => {
5573                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5574                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5575                                         }
5576                                 }
5577                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5578                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5579                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5580                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5581                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5582                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5583                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5584                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5585                         }
5586                 }
5587
5588                 // Before we change the state of the channel, we check if the peer is sending a very old
5589                 // commitment transaction number, if yes we send a warning message.
5590                 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5591                         return Err(ChannelError::Warn(format!(
5592                                 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5593                                 msg.next_remote_commitment_number,
5594                                 our_commitment_transaction
5595                         )));
5596                 }
5597
5598                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5599                 // remaining cases either succeed or ErrorMessage-fail).
5600                 self.context.channel_state.clear_peer_disconnected();
5601                 self.context.sent_message_awaiting_response = None;
5602
5603                 let shutdown_msg = self.get_outbound_shutdown();
5604
5605                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger);
5606
5607                 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5608                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5609                         if !self.context.channel_state.is_our_channel_ready() ||
5610                                         self.context.channel_state.is_monitor_update_in_progress() {
5611                                 if msg.next_remote_commitment_number != 0 {
5612                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5613                                 }
5614                                 // Short circuit the whole handler as there is nothing we can resend them
5615                                 return Ok(ReestablishResponses {
5616                                         channel_ready: None,
5617                                         raa: None, commitment_update: None,
5618                                         order: RAACommitmentOrder::CommitmentFirst,
5619                                         shutdown_msg, announcement_sigs,
5620                                 });
5621                         }
5622
5623                         // We have OurChannelReady set!
5624                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5625                         return Ok(ReestablishResponses {
5626                                 channel_ready: Some(msgs::ChannelReady {
5627                                         channel_id: self.context.channel_id(),
5628                                         next_per_commitment_point,
5629                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
5630                                 }),
5631                                 raa: None, commitment_update: None,
5632                                 order: RAACommitmentOrder::CommitmentFirst,
5633                                 shutdown_msg, announcement_sigs,
5634                         });
5635                 }
5636
5637                 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5638                         // Remote isn't waiting on any RevokeAndACK from us!
5639                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5640                         None
5641                 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5642                         if self.context.channel_state.is_monitor_update_in_progress() {
5643                                 self.context.monitor_pending_revoke_and_ack = true;
5644                                 None
5645                         } else {
5646                                 Some(self.get_last_revoke_and_ack())
5647                         }
5648                 } else {
5649                         debug_assert!(false, "All values should have been handled in the four cases above");
5650                         return Err(ChannelError::Close(format!(
5651                                 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5652                                 msg.next_remote_commitment_number,
5653                                 our_commitment_transaction
5654                         )));
5655                 };
5656
5657                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5658                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5659                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5660                 // the corresponding revoke_and_ack back yet.
5661                 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5662                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5663                         self.mark_awaiting_response();
5664                 }
5665                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5666
5667                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5668                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5669                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5670                         Some(msgs::ChannelReady {
5671                                 channel_id: self.context.channel_id(),
5672                                 next_per_commitment_point,
5673                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5674                         })
5675                 } else { None };
5676
5677                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5678                         if required_revoke.is_some() {
5679                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5680                         } else {
5681                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5682                         }
5683
5684                         Ok(ReestablishResponses {
5685                                 channel_ready, shutdown_msg, announcement_sigs,
5686                                 raa: required_revoke,
5687                                 commitment_update: None,
5688                                 order: self.context.resend_order.clone(),
5689                         })
5690                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5691                         if required_revoke.is_some() {
5692                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5693                         } else {
5694                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5695                         }
5696
5697                         if self.context.channel_state.is_monitor_update_in_progress() {
5698                                 self.context.monitor_pending_commitment_signed = true;
5699                                 Ok(ReestablishResponses {
5700                                         channel_ready, shutdown_msg, announcement_sigs,
5701                                         commitment_update: None, raa: None,
5702                                         order: self.context.resend_order.clone(),
5703                                 })
5704                         } else {
5705                                 Ok(ReestablishResponses {
5706                                         channel_ready, shutdown_msg, announcement_sigs,
5707                                         raa: required_revoke,
5708                                         commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5709                                         order: self.context.resend_order.clone(),
5710                                 })
5711                         }
5712                 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5713                         Err(ChannelError::Close(format!(
5714                                 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5715                                 msg.next_local_commitment_number,
5716                                 next_counterparty_commitment_number,
5717                         )))
5718                 } else {
5719                         Err(ChannelError::Close(format!(
5720                                 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5721                                 msg.next_local_commitment_number,
5722                                 next_counterparty_commitment_number,
5723                         )))
5724                 }
5725         }
5726
5727         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5728         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5729         /// at which point they will be recalculated.
5730         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5731                 -> (u64, u64)
5732                 where F::Target: FeeEstimator
5733         {
5734                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5735
5736                 // Propose a range from our current Background feerate to our Normal feerate plus our
5737                 // force_close_avoidance_max_fee_satoshis.
5738                 // If we fail to come to consensus, we'll have to force-close.
5739                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5740                 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5741                 // that we don't expect to need fee bumping
5742                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5743                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5744
5745                 // The spec requires that (when the channel does not have anchors) we only send absolute
5746                 // channel fees no greater than the absolute channel fee on the current commitment
5747                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5748                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5749                 // some force-closure by old nodes, but we wanted to close the channel anyway.
5750
5751                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5752                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5753                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5754                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5755                 }
5756
5757                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5758                 // below our dust limit, causing the output to disappear. We don't bother handling this
5759                 // case, however, as this should only happen if a channel is closed before any (material)
5760                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5761                 // come to consensus with our counterparty on appropriate fees, however it should be a
5762                 // relatively rare case. We can revisit this later, though note that in order to determine
5763                 // if the funders' output is dust we have to know the absolute fee we're going to use.
5764                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5765                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5766                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5767                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
5768                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
5769                                 // target feerate-calculated fee.
5770                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5771                                         proposed_max_feerate as u64 * tx_weight / 1000)
5772                         } else {
5773                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5774                         };
5775
5776                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5777                 self.context.closing_fee_limits.clone().unwrap()
5778         }
5779
5780         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5781         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5782         /// this point if we're the funder we should send the initial closing_signed, and in any case
5783         /// shutdown should complete within a reasonable timeframe.
5784         fn closing_negotiation_ready(&self) -> bool {
5785                 self.context.closing_negotiation_ready()
5786         }
5787
5788         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5789         /// an Err if no progress is being made and the channel should be force-closed instead.
5790         /// Should be called on a one-minute timer.
5791         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5792                 if self.closing_negotiation_ready() {
5793                         if self.context.closing_signed_in_flight {
5794                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5795                         } else {
5796                                 self.context.closing_signed_in_flight = true;
5797                         }
5798                 }
5799                 Ok(())
5800         }
5801
5802         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5803                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5804                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5805                 where F::Target: FeeEstimator, L::Target: Logger
5806         {
5807                 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5808                 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5809                 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5810                 // that closing_negotiation_ready checks this case (as well as a few others).
5811                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5812                         return Ok((None, None, None));
5813                 }
5814
5815                 if !self.context.is_outbound() {
5816                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5817                                 return self.closing_signed(fee_estimator, &msg);
5818                         }
5819                         return Ok((None, None, None));
5820                 }
5821
5822                 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5823                 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5824                 if self.context.expecting_peer_commitment_signed {
5825                         return Ok((None, None, None));
5826                 }
5827
5828                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5829
5830                 assert!(self.context.shutdown_scriptpubkey.is_some());
5831                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5832                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5833                         our_min_fee, our_max_fee, total_fee_satoshis);
5834
5835                 match &self.context.holder_signer {
5836                         ChannelSignerType::Ecdsa(ecdsa) => {
5837                                 let sig = ecdsa
5838                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5839                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5840
5841                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5842                                 Ok((Some(msgs::ClosingSigned {
5843                                         channel_id: self.context.channel_id,
5844                                         fee_satoshis: total_fee_satoshis,
5845                                         signature: sig,
5846                                         fee_range: Some(msgs::ClosingSignedFeeRange {
5847                                                 min_fee_satoshis: our_min_fee,
5848                                                 max_fee_satoshis: our_max_fee,
5849                                         }),
5850                                 }), None, None))
5851                         },
5852                         // TODO (taproot|arik)
5853                         #[cfg(taproot)]
5854                         _ => todo!()
5855                 }
5856         }
5857
5858         // Marks a channel as waiting for a response from the counterparty. If it's not received
5859         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5860         // a reconnection.
5861         fn mark_awaiting_response(&mut self) {
5862                 self.context.sent_message_awaiting_response = Some(0);
5863         }
5864
5865         /// Determines whether we should disconnect the counterparty due to not receiving a response
5866         /// within our expected timeframe.
5867         ///
5868         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5869         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5870                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5871                         ticks_elapsed
5872                 } else {
5873                         // Don't disconnect when we're not waiting on a response.
5874                         return false;
5875                 };
5876                 *ticks_elapsed += 1;
5877                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5878         }
5879
5880         pub fn shutdown(
5881                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5882         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5883         {
5884                 if self.context.channel_state.is_peer_disconnected() {
5885                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5886                 }
5887                 if self.context.channel_state.is_pre_funded_state() {
5888                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
5889                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5890                         // can do that via error message without getting a connection fail anyway...
5891                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5892                 }
5893                 for htlc in self.context.pending_inbound_htlcs.iter() {
5894                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5895                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5896                         }
5897                 }
5898                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5899
5900                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5901                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5902                 }
5903
5904                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5905                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5906                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5907                         }
5908                 } else {
5909                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5910                 }
5911
5912                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5913                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5914                 // any further commitment updates after we set LocalShutdownSent.
5915                 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5916
5917                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5918                         Some(_) => false,
5919                         None => {
5920                                 assert!(send_shutdown);
5921                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5922                                         Ok(scriptpubkey) => scriptpubkey,
5923                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5924                                 };
5925                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5926                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5927                                 }
5928                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5929                                 true
5930                         },
5931                 };
5932
5933                 // From here on out, we may not fail!
5934
5935                 self.context.channel_state.set_remote_shutdown_sent();
5936                 self.context.update_time_counter += 1;
5937
5938                 let monitor_update = if update_shutdown_script {
5939                         self.context.latest_monitor_update_id += 1;
5940                         let monitor_update = ChannelMonitorUpdate {
5941                                 update_id: self.context.latest_monitor_update_id,
5942                                 counterparty_node_id: Some(self.context.counterparty_node_id),
5943                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5944                                         scriptpubkey: self.get_closing_scriptpubkey(),
5945                                 }],
5946                                 channel_id: Some(self.context.channel_id()),
5947                         };
5948                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5949                         self.push_ret_blockable_mon_update(monitor_update)
5950                 } else { None };
5951                 let shutdown = if send_shutdown {
5952                         Some(msgs::Shutdown {
5953                                 channel_id: self.context.channel_id,
5954                                 scriptpubkey: self.get_closing_scriptpubkey(),
5955                         })
5956                 } else { None };
5957
5958                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5959                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5960                 // cell HTLCs and return them to fail the payment.
5961                 self.context.holding_cell_update_fee = None;
5962                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5963                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5964                         match htlc_update {
5965                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5966                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5967                                         false
5968                                 },
5969                                 _ => true
5970                         }
5971                 });
5972
5973                 self.context.channel_state.set_local_shutdown_sent();
5974                 self.context.update_time_counter += 1;
5975
5976                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5977         }
5978
5979         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5980                 let mut tx = closing_tx.trust().built_transaction().clone();
5981
5982                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5983
5984                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5985                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5986                 let mut holder_sig = sig.serialize_der().to_vec();
5987                 holder_sig.push(EcdsaSighashType::All as u8);
5988                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5989                 cp_sig.push(EcdsaSighashType::All as u8);
5990                 if funding_key[..] < counterparty_funding_key[..] {
5991                         tx.input[0].witness.push(holder_sig);
5992                         tx.input[0].witness.push(cp_sig);
5993                 } else {
5994                         tx.input[0].witness.push(cp_sig);
5995                         tx.input[0].witness.push(holder_sig);
5996                 }
5997
5998                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5999                 tx
6000         }
6001
6002         pub fn closing_signed<F: Deref>(
6003                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
6004                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
6005                 where F::Target: FeeEstimator
6006         {
6007                 if !self.context.channel_state.is_both_sides_shutdown() {
6008                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
6009                 }
6010                 if self.context.channel_state.is_peer_disconnected() {
6011                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
6012                 }
6013                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
6014                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
6015                 }
6016                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
6017                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
6018                 }
6019
6020                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
6021                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
6022                 }
6023
6024                 if self.context.channel_state.is_monitor_update_in_progress() {
6025                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
6026                         return Ok((None, None, None));
6027                 }
6028
6029                 let funding_redeemscript = self.context.get_funding_redeemscript();
6030                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
6031                 if used_total_fee != msg.fee_satoshis {
6032                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
6033                 }
6034                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
6035
6036                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
6037                         Ok(_) => {},
6038                         Err(_e) => {
6039                                 // The remote end may have decided to revoke their output due to inconsistent dust
6040                                 // limits, so check for that case by re-checking the signature here.
6041                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
6042                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
6043                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
6044                         },
6045                 };
6046
6047                 for outp in closing_tx.trust().built_transaction().output.iter() {
6048                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
6049                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
6050                         }
6051                 }
6052
6053                 let closure_reason = if self.initiated_shutdown() {
6054                         ClosureReason::LocallyInitiatedCooperativeClosure
6055                 } else {
6056                         ClosureReason::CounterpartyInitiatedCooperativeClosure
6057                 };
6058
6059                 assert!(self.context.shutdown_scriptpubkey.is_some());
6060                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
6061                         if last_fee == msg.fee_satoshis {
6062                                 let shutdown_result = ShutdownResult {
6063                                         closure_reason,
6064                                         monitor_update: None,
6065                                         dropped_outbound_htlcs: Vec::new(),
6066                                         unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
6067                                         channel_id: self.context.channel_id,
6068                                         user_channel_id: self.context.user_id,
6069                                         channel_capacity_satoshis: self.context.channel_value_satoshis,
6070                                         counterparty_node_id: self.context.counterparty_node_id,
6071                                         unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
6072                                         channel_funding_txo: self.context.get_funding_txo(),
6073                                 };
6074                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
6075                                 self.context.channel_state = ChannelState::ShutdownComplete;
6076                                 self.context.update_time_counter += 1;
6077                                 return Ok((None, Some(tx), Some(shutdown_result)));
6078                         }
6079                 }
6080
6081                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
6082
6083                 macro_rules! propose_fee {
6084                         ($new_fee: expr) => {
6085                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
6086                                         (closing_tx, $new_fee)
6087                                 } else {
6088                                         self.build_closing_transaction($new_fee, false)
6089                                 };
6090
6091                                 return match &self.context.holder_signer {
6092                                         ChannelSignerType::Ecdsa(ecdsa) => {
6093                                                 let sig = ecdsa
6094                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
6095                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
6096                                                 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
6097                                                         let shutdown_result = ShutdownResult {
6098                                                                 closure_reason,
6099                                                                 monitor_update: None,
6100                                                                 dropped_outbound_htlcs: Vec::new(),
6101                                                                 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
6102                                                                 channel_id: self.context.channel_id,
6103                                                                 user_channel_id: self.context.user_id,
6104                                                                 channel_capacity_satoshis: self.context.channel_value_satoshis,
6105                                                                 counterparty_node_id: self.context.counterparty_node_id,
6106                                                                 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
6107                                                                 channel_funding_txo: self.context.get_funding_txo(),
6108                                                         };
6109                                                         self.context.channel_state = ChannelState::ShutdownComplete;
6110                                                         self.context.update_time_counter += 1;
6111                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
6112                                                         (Some(tx), Some(shutdown_result))
6113                                                 } else {
6114                                                         (None, None)
6115                                                 };
6116
6117                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
6118                                                 Ok((Some(msgs::ClosingSigned {
6119                                                         channel_id: self.context.channel_id,
6120                                                         fee_satoshis: used_fee,
6121                                                         signature: sig,
6122                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
6123                                                                 min_fee_satoshis: our_min_fee,
6124                                                                 max_fee_satoshis: our_max_fee,
6125                                                         }),
6126                                                 }), signed_tx, shutdown_result))
6127                                         },
6128                                         // TODO (taproot|arik)
6129                                         #[cfg(taproot)]
6130                                         _ => todo!()
6131                                 }
6132                         }
6133                 }
6134
6135                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
6136                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
6137                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
6138                         }
6139                         if max_fee_satoshis < our_min_fee {
6140                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
6141                         }
6142                         if min_fee_satoshis > our_max_fee {
6143                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
6144                         }
6145
6146                         if !self.context.is_outbound() {
6147                                 // They have to pay, so pick the highest fee in the overlapping range.
6148                                 // We should never set an upper bound aside from their full balance
6149                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
6150                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
6151                         } else {
6152                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
6153                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
6154                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
6155                                 }
6156                                 // The proposed fee is in our acceptable range, accept it and broadcast!
6157                                 propose_fee!(msg.fee_satoshis);
6158                         }
6159                 } else {
6160                         // Old fee style negotiation. We don't bother to enforce whether they are complying
6161                         // with the "making progress" requirements, we just comply and hope for the best.
6162                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
6163                                 if msg.fee_satoshis > last_fee {
6164                                         if msg.fee_satoshis < our_max_fee {
6165                                                 propose_fee!(msg.fee_satoshis);
6166                                         } else if last_fee < our_max_fee {
6167                                                 propose_fee!(our_max_fee);
6168                                         } else {
6169                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
6170                                         }
6171                                 } else {
6172                                         if msg.fee_satoshis > our_min_fee {
6173                                                 propose_fee!(msg.fee_satoshis);
6174                                         } else if last_fee > our_min_fee {
6175                                                 propose_fee!(our_min_fee);
6176                                         } else {
6177                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
6178                                         }
6179                                 }
6180                         } else {
6181                                 if msg.fee_satoshis < our_min_fee {
6182                                         propose_fee!(our_min_fee);
6183                                 } else if msg.fee_satoshis > our_max_fee {
6184                                         propose_fee!(our_max_fee);
6185                                 } else {
6186                                         propose_fee!(msg.fee_satoshis);
6187                                 }
6188                         }
6189                 }
6190         }
6191
6192         fn internal_htlc_satisfies_config(
6193                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
6194         ) -> Result<(), (&'static str, u16)> {
6195                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
6196                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
6197                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
6198                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
6199                         return Err((
6200                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
6201                                 0x1000 | 12, // fee_insufficient
6202                         ));
6203                 }
6204                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
6205                         return Err((
6206                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
6207                                 0x1000 | 13, // incorrect_cltv_expiry
6208                         ));
6209                 }
6210                 Ok(())
6211         }
6212
6213         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
6214         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
6215         /// unsuccessful, falls back to the previous one if one exists.
6216         pub fn htlc_satisfies_config(
6217                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
6218         ) -> Result<(), (&'static str, u16)> {
6219                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
6220                         .or_else(|err| {
6221                                 if let Some(prev_config) = self.context.prev_config() {
6222                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
6223                                 } else {
6224                                         Err(err)
6225                                 }
6226                         })
6227         }
6228
6229         pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
6230                 &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
6231         ) -> Result<(), (&'static str, u16)>
6232         where
6233                 F::Target: FeeEstimator,
6234                 L::Target: Logger
6235         {
6236                 if self.context.channel_state.is_local_shutdown_sent() {
6237                         return Err(("Shutdown was already sent", 0x4000|8))
6238                 }
6239
6240                 let dust_exposure_limiting_feerate = self.context.get_dust_exposure_limiting_feerate(&fee_estimator);
6241                 let htlc_stats = self.context.get_pending_htlc_stats(None, dust_exposure_limiting_feerate);
6242                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(dust_exposure_limiting_feerate);
6243                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6244                         (0, 0)
6245                 } else {
6246                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
6247                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
6248                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
6249                 };
6250                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
6251                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
6252                         let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
6253                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6254                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
6255                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6256                                 return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
6257                         }
6258                 } else {
6259                         let htlc_dust_exposure_msat =
6260                                 per_outbound_htlc_counterparty_commit_tx_fee_msat(self.context.feerate_per_kw, &self.context.channel_type);
6261                         let counterparty_tx_dust_exposure =
6262                                 htlc_stats.on_counterparty_tx_dust_exposure_msat.saturating_add(htlc_dust_exposure_msat);
6263                         if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
6264                                 log_info!(logger, "Cannot accept value that would put our exposure to tx fee dust at {} over the limit {} on counterparty commitment tx",
6265                                         counterparty_tx_dust_exposure, max_dust_htlc_exposure_msat);
6266                                 return Err(("Exceeded our tx fee dust exposure limit on counterparty commitment tx", 0x1000|7))
6267                         }
6268                 }
6269
6270                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
6271                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
6272                         let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
6273                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6274                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
6275                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6276                                 return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
6277                         }
6278                 }
6279
6280                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6281                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
6282                 } else {
6283                         0
6284                 };
6285
6286                 let mut removed_outbound_total_msat = 0;
6287                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
6288                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
6289                                 removed_outbound_total_msat += htlc.amount_msat;
6290                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
6291                                 removed_outbound_total_msat += htlc.amount_msat;
6292                         }
6293                 }
6294
6295                 let pending_value_to_self_msat =
6296                         self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
6297                 let pending_remote_value_msat =
6298                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
6299
6300                 if !self.context.is_outbound() {
6301                         // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
6302                         // the spec because the fee spike buffer requirement doesn't exist on the receiver's
6303                         // side, only on the sender's. Note that with anchor outputs we are no longer as
6304                         // sensitive to fee spikes, so we need to account for them.
6305                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
6306                         let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
6307                         if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6308                                 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
6309                         }
6310                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
6311                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
6312                                 return Err(("Fee spike buffer violation", 0x1000|7));
6313                         }
6314                 }
6315
6316                 Ok(())
6317         }
6318
6319         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
6320                 self.context.cur_holder_commitment_transaction_number + 1
6321         }
6322
6323         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
6324                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
6325         }
6326
6327         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
6328                 self.context.cur_counterparty_commitment_transaction_number + 2
6329         }
6330
6331         #[cfg(test)]
6332         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
6333                 &self.context.holder_signer
6334         }
6335
6336         #[cfg(test)]
6337         pub fn get_value_stat(&self) -> ChannelValueStat {
6338                 ChannelValueStat {
6339                         value_to_self_msat: self.context.value_to_self_msat,
6340                         channel_value_msat: self.context.channel_value_satoshis * 1000,
6341                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
6342                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6343                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6344                         holding_cell_outbound_amount_msat: {
6345                                 let mut res = 0;
6346                                 for h in self.context.holding_cell_htlc_updates.iter() {
6347                                         match h {
6348                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
6349                                                         res += amount_msat;
6350                                                 }
6351                                                 _ => {}
6352                                         }
6353                                 }
6354                                 res
6355                         },
6356                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
6357                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
6358                 }
6359         }
6360
6361         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
6362         /// Allowed in any state (including after shutdown)
6363         pub fn is_awaiting_monitor_update(&self) -> bool {
6364                 self.context.channel_state.is_monitor_update_in_progress()
6365         }
6366
6367         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
6368         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
6369                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
6370                 self.context.blocked_monitor_updates[0].update.update_id - 1
6371         }
6372
6373         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
6374         /// further blocked monitor update exists after the next.
6375         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
6376                 if self.context.blocked_monitor_updates.is_empty() { return None; }
6377                 Some((self.context.blocked_monitor_updates.remove(0).update,
6378                         !self.context.blocked_monitor_updates.is_empty()))
6379         }
6380
6381         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
6382         /// immediately given to the user for persisting or `None` if it should be held as blocked.
6383         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
6384         -> Option<ChannelMonitorUpdate> {
6385                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
6386                 if !release_monitor {
6387                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
6388                                 update,
6389                         });
6390                         None
6391                 } else {
6392                         Some(update)
6393                 }
6394         }
6395
6396         /// On startup, its possible we detect some monitor updates have actually completed (and the
6397         /// ChannelManager was simply stale). In that case, we should simply drop them, which we do
6398         /// here after logging them.
6399         pub fn on_startup_drop_completed_blocked_mon_updates_through<L: Logger>(&mut self, logger: &L, loaded_mon_update_id: u64) {
6400                 let channel_id = self.context.channel_id();
6401                 self.context.blocked_monitor_updates.retain(|update| {
6402                         if update.update.update_id <= loaded_mon_update_id {
6403                                 log_info!(
6404                                         logger,
6405                                         "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager",
6406                                         update.update.update_id,
6407                                         channel_id,
6408                                 );
6409                                 false
6410                         } else {
6411                                 true
6412                         }
6413                 });
6414         }
6415
6416         pub fn blocked_monitor_updates_pending(&self) -> usize {
6417                 self.context.blocked_monitor_updates.len()
6418         }
6419
6420         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
6421         /// If the channel is outbound, this implies we have not yet broadcasted the funding
6422         /// transaction. If the channel is inbound, this implies simply that the channel has not
6423         /// advanced state.
6424         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
6425                 if !self.is_awaiting_monitor_update() { return false; }
6426                 if matches!(
6427                         self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
6428                         if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
6429                 ) {
6430                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
6431                         // AwaitingChannelReady set, though our peer could have sent their channel_ready.
6432                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
6433                         return true;
6434                 }
6435                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
6436                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
6437                         // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
6438                         // waiting for the initial monitor persistence. Thus, we check if our commitment
6439                         // transaction numbers have both been iterated only exactly once (for the
6440                         // funding_signed), and we're awaiting monitor update.
6441                         //
6442                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
6443                         // only way to get an awaiting-monitor-update state during initial funding is if the
6444                         // initial monitor persistence is still pending).
6445                         //
6446                         // Because deciding we're awaiting initial broadcast spuriously could result in
6447                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
6448                         // we hard-assert here, even in production builds.
6449                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
6450                         assert!(self.context.monitor_pending_channel_ready);
6451                         assert_eq!(self.context.latest_monitor_update_id, 0);
6452                         return true;
6453                 }
6454                 false
6455         }
6456
6457         /// Returns true if our channel_ready has been sent
6458         pub fn is_our_channel_ready(&self) -> bool {
6459                 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
6460                         matches!(self.context.channel_state, ChannelState::ChannelReady(_))
6461         }
6462
6463         /// Returns true if our peer has either initiated or agreed to shut down the channel.
6464         pub fn received_shutdown(&self) -> bool {
6465                 self.context.channel_state.is_remote_shutdown_sent()
6466         }
6467
6468         /// Returns true if we either initiated or agreed to shut down the channel.
6469         pub fn sent_shutdown(&self) -> bool {
6470                 self.context.channel_state.is_local_shutdown_sent()
6471         }
6472
6473         /// Returns true if we initiated to shut down the channel.
6474         pub fn initiated_shutdown(&self) -> bool {
6475                 self.context.local_initiated_shutdown.is_some()
6476         }
6477
6478         /// Returns true if this channel is fully shut down. True here implies that no further actions
6479         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
6480         /// will be handled appropriately by the chain monitor.
6481         pub fn is_shutdown(&self) -> bool {
6482                 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
6483         }
6484
6485         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
6486                 self.context.channel_update_status
6487         }
6488
6489         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
6490                 self.context.update_time_counter += 1;
6491                 self.context.channel_update_status = status;
6492         }
6493
6494         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
6495                 // Called:
6496                 //  * always when a new block/transactions are confirmed with the new height
6497                 //  * when funding is signed with a height of 0
6498                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
6499                         return None;
6500                 }
6501
6502                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6503                 if funding_tx_confirmations <= 0 {
6504                         self.context.funding_tx_confirmation_height = 0;
6505                 }
6506
6507                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
6508                         return None;
6509                 }
6510
6511                 // If we're still pending the signature on a funding transaction, then we're not ready to send a
6512                 // channel_ready yet.
6513                 if self.context.signer_pending_funding {
6514                         return None;
6515                 }
6516
6517                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
6518                 // channel_ready until the entire batch is ready.
6519                 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
6520                         self.context.channel_state.set_our_channel_ready();
6521                         true
6522                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
6523                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
6524                         self.context.update_time_counter += 1;
6525                         true
6526                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6527                         // We got a reorg but not enough to trigger a force close, just ignore.
6528                         false
6529                 } else {
6530                         if self.context.funding_tx_confirmation_height != 0 &&
6531                                 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6532                         {
6533                                 // We should never see a funding transaction on-chain until we've received
6534                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6535                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6536                                 // however, may do this and we shouldn't treat it as a bug.
6537                                 #[cfg(not(fuzzing))]
6538                                 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6539                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6540                                         self.context.channel_state.to_u32());
6541                         }
6542                         // We got a reorg but not enough to trigger a force close, just ignore.
6543                         false
6544                 };
6545
6546                 if need_commitment_update {
6547                         if !self.context.channel_state.is_monitor_update_in_progress() {
6548                                 if !self.context.channel_state.is_peer_disconnected() {
6549                                         let next_per_commitment_point =
6550                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6551                                         return Some(msgs::ChannelReady {
6552                                                 channel_id: self.context.channel_id,
6553                                                 next_per_commitment_point,
6554                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6555                                         });
6556                                 }
6557                         } else {
6558                                 self.context.monitor_pending_channel_ready = true;
6559                         }
6560                 }
6561                 None
6562         }
6563
6564         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6565         /// In the first case, we store the confirmation height and calculating the short channel id.
6566         /// In the second, we simply return an Err indicating we need to be force-closed now.
6567         pub fn transactions_confirmed<NS: Deref, L: Deref>(
6568                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6569                 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6570         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6571         where
6572                 NS::Target: NodeSigner,
6573                 L::Target: Logger
6574         {
6575                 let mut msgs = (None, None);
6576                 if let Some(funding_txo) = self.context.get_funding_txo() {
6577                         for &(index_in_block, tx) in txdata.iter() {
6578                                 // Check if the transaction is the expected funding transaction, and if it is,
6579                                 // check that it pays the right amount to the right script.
6580                                 if self.context.funding_tx_confirmation_height == 0 {
6581                                         if tx.txid() == funding_txo.txid {
6582                                                 let txo_idx = funding_txo.index as usize;
6583                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
6584                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
6585                                                         if self.context.is_outbound() {
6586                                                                 // If we generated the funding transaction and it doesn't match what it
6587                                                                 // should, the client is really broken and we should just panic and
6588                                                                 // tell them off. That said, because hash collisions happen with high
6589                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
6590                                                                 // channel and move on.
6591                                                                 #[cfg(not(fuzzing))]
6592                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6593                                                         }
6594                                                         self.context.update_time_counter += 1;
6595                                                         let err_reason = "funding tx had wrong script/value or output index";
6596                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6597                                                 } else {
6598                                                         if self.context.is_outbound() {
6599                                                                 if !tx.is_coin_base() {
6600                                                                         for input in tx.input.iter() {
6601                                                                                 if input.witness.is_empty() {
6602                                                                                         // We generated a malleable funding transaction, implying we've
6603                                                                                         // just exposed ourselves to funds loss to our counterparty.
6604                                                                                         #[cfg(not(fuzzing))]
6605                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6606                                                                                 }
6607                                                                         }
6608                                                                 }
6609                                                         }
6610                                                         self.context.funding_tx_confirmation_height = height;
6611                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
6612                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6613                                                                 Ok(scid) => Some(scid),
6614                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6615                                                         }
6616                                                 }
6617                                                 // If this is a coinbase transaction and not a 0-conf channel
6618                                                 // we should update our min_depth to 100 to handle coinbase maturity
6619                                                 if tx.is_coin_base() &&
6620                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
6621                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6622                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
6623                                                 }
6624                                         }
6625                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
6626                                         // send it immediately instead of waiting for a best_block_updated call (which
6627                                         // may have already happened for this block).
6628                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
6629                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6630                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6631                                                 msgs = (Some(channel_ready), announcement_sigs);
6632                                         }
6633                                 }
6634                                 for inp in tx.input.iter() {
6635                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6636                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6637                                                 return Err(ClosureReason::CommitmentTxConfirmed);
6638                                         }
6639                                 }
6640                         }
6641                 }
6642                 Ok(msgs)
6643         }
6644
6645         /// When a new block is connected, we check the height of the block against outbound holding
6646         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6647         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6648         /// handled by the ChannelMonitor.
6649         ///
6650         /// If we return Err, the channel may have been closed, at which point the standard
6651         /// requirements apply - no calls may be made except those explicitly stated to be allowed
6652         /// post-shutdown.
6653         ///
6654         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6655         /// back.
6656         pub fn best_block_updated<NS: Deref, L: Deref>(
6657                 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6658                 node_signer: &NS, user_config: &UserConfig, logger: &L
6659         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6660         where
6661                 NS::Target: NodeSigner,
6662                 L::Target: Logger
6663         {
6664                 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6665         }
6666
6667         fn do_best_block_updated<NS: Deref, L: Deref>(
6668                 &mut self, height: u32, highest_header_time: u32,
6669                 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6670         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6671         where
6672                 NS::Target: NodeSigner,
6673                 L::Target: Logger
6674         {
6675                 let mut timed_out_htlcs = Vec::new();
6676                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6677                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6678                 // ~now.
6679                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6680                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6681                         match htlc_update {
6682                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6683                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6684                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6685                                                 false
6686                                         } else { true }
6687                                 },
6688                                 _ => true
6689                         }
6690                 });
6691
6692                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6693
6694                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6695                         let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6696                                 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6697                         } else { None };
6698                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6699                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6700                 }
6701
6702                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6703                         self.context.channel_state.is_our_channel_ready() {
6704                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6705                         if self.context.funding_tx_confirmation_height == 0 {
6706                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6707                                 // zero if it has been reorged out, however in either case, our state flags
6708                                 // indicate we've already sent a channel_ready
6709                                 funding_tx_confirmations = 0;
6710                         }
6711
6712                         // If we've sent channel_ready (or have both sent and received channel_ready), and
6713                         // the funding transaction has become unconfirmed,
6714                         // close the channel and hope we can get the latest state on chain (because presumably
6715                         // the funding transaction is at least still in the mempool of most nodes).
6716                         //
6717                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6718                         // 0-conf channel, but not doing so may lead to the
6719                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
6720                         // to.
6721                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6722                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6723                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6724                                 return Err(ClosureReason::ProcessingError { err: err_reason });
6725                         }
6726                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6727                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6728                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6729                         // If funding_tx_confirmed_in is unset, the channel must not be active
6730                         assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6731                         assert!(!self.context.channel_state.is_our_channel_ready());
6732                         return Err(ClosureReason::FundingTimedOut);
6733                 }
6734
6735                 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6736                         self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6737                 } else { None };
6738                 Ok((None, timed_out_htlcs, announcement_sigs))
6739         }
6740
6741         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6742         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6743         /// before the channel has reached channel_ready and we can just wait for more blocks.
6744         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6745                 if self.context.funding_tx_confirmation_height != 0 {
6746                         // We handle the funding disconnection by calling best_block_updated with a height one
6747                         // below where our funding was connected, implying a reorg back to conf_height - 1.
6748                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
6749                         // We use the time field to bump the current time we set on channel updates if its
6750                         // larger. If we don't know that time has moved forward, we can just set it to the last
6751                         // time we saw and it will be ignored.
6752                         let best_time = self.context.update_time_counter;
6753                         match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6754                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6755                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6756                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6757                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6758                                         Ok(())
6759                                 },
6760                                 Err(e) => Err(e)
6761                         }
6762                 } else {
6763                         // We never learned about the funding confirmation anyway, just ignore
6764                         Ok(())
6765                 }
6766         }
6767
6768         // Methods to get unprompted messages to send to the remote end (or where we already returned
6769         // something in the handler for the message that prompted this message):
6770
6771         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6772         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6773         /// directions). Should be used for both broadcasted announcements and in response to an
6774         /// AnnouncementSignatures message from the remote peer.
6775         ///
6776         /// Will only fail if we're not in a state where channel_announcement may be sent (including
6777         /// closing).
6778         ///
6779         /// This will only return ChannelError::Ignore upon failure.
6780         ///
6781         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6782         fn get_channel_announcement<NS: Deref>(
6783                 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6784         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6785                 if !self.context.config.announced_channel {
6786                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6787                 }
6788                 if !self.context.is_usable() {
6789                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6790                 }
6791
6792                 let short_channel_id = self.context.get_short_channel_id()
6793                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6794                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6795                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6796                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6797                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6798
6799                 let msg = msgs::UnsignedChannelAnnouncement {
6800                         features: channelmanager::provided_channel_features(&user_config),
6801                         chain_hash,
6802                         short_channel_id,
6803                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6804                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6805                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6806                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6807                         excess_data: Vec::new(),
6808                 };
6809
6810                 Ok(msg)
6811         }
6812
6813         fn get_announcement_sigs<NS: Deref, L: Deref>(
6814                 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6815                 best_block_height: u32, logger: &L
6816         ) -> Option<msgs::AnnouncementSignatures>
6817         where
6818                 NS::Target: NodeSigner,
6819                 L::Target: Logger
6820         {
6821                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6822                         return None;
6823                 }
6824
6825                 if !self.context.is_usable() {
6826                         return None;
6827                 }
6828
6829                 if self.context.channel_state.is_peer_disconnected() {
6830                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6831                         return None;
6832                 }
6833
6834                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6835                         return None;
6836                 }
6837
6838                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6839                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6840                         Ok(a) => a,
6841                         Err(e) => {
6842                                 log_trace!(logger, "{:?}", e);
6843                                 return None;
6844                         }
6845                 };
6846                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6847                         Err(_) => {
6848                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6849                                 return None;
6850                         },
6851                         Ok(v) => v
6852                 };
6853                 match &self.context.holder_signer {
6854                         ChannelSignerType::Ecdsa(ecdsa) => {
6855                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6856                                         Err(_) => {
6857                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6858                                                 return None;
6859                                         },
6860                                         Ok(v) => v
6861                                 };
6862                                 let short_channel_id = match self.context.get_short_channel_id() {
6863                                         Some(scid) => scid,
6864                                         None => return None,
6865                                 };
6866
6867                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6868
6869                                 Some(msgs::AnnouncementSignatures {
6870                                         channel_id: self.context.channel_id(),
6871                                         short_channel_id,
6872                                         node_signature: our_node_sig,
6873                                         bitcoin_signature: our_bitcoin_sig,
6874                                 })
6875                         },
6876                         // TODO (taproot|arik)
6877                         #[cfg(taproot)]
6878                         _ => todo!()
6879                 }
6880         }
6881
6882         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6883         /// available.
6884         fn sign_channel_announcement<NS: Deref>(
6885                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6886         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6887                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6888                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6889                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6890                         let were_node_one = announcement.node_id_1 == our_node_key;
6891
6892                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6893                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6894                         match &self.context.holder_signer {
6895                                 ChannelSignerType::Ecdsa(ecdsa) => {
6896                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6897                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6898                                         Ok(msgs::ChannelAnnouncement {
6899                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6900                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6901                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6902                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6903                                                 contents: announcement,
6904                                         })
6905                                 },
6906                                 // TODO (taproot|arik)
6907                                 #[cfg(taproot)]
6908                                 _ => todo!()
6909                         }
6910                 } else {
6911                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6912                 }
6913         }
6914
6915         /// Processes an incoming announcement_signatures message, providing a fully-signed
6916         /// channel_announcement message which we can broadcast and storing our counterparty's
6917         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6918         pub fn announcement_signatures<NS: Deref>(
6919                 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6920                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6921         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6922                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6923
6924                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6925
6926                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6927                         return Err(ChannelError::Close(format!(
6928                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6929                                  &announcement, self.context.get_counterparty_node_id())));
6930                 }
6931                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6932                         return Err(ChannelError::Close(format!(
6933                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6934                                 &announcement, self.context.counterparty_funding_pubkey())));
6935                 }
6936
6937                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6938                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6939                         return Err(ChannelError::Ignore(
6940                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6941                 }
6942
6943                 self.sign_channel_announcement(node_signer, announcement)
6944         }
6945
6946         /// Gets a signed channel_announcement for this channel, if we previously received an
6947         /// announcement_signatures from our counterparty.
6948         pub fn get_signed_channel_announcement<NS: Deref>(
6949                 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6950         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6951                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6952                         return None;
6953                 }
6954                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6955                         Ok(res) => res,
6956                         Err(_) => return None,
6957                 };
6958                 match self.sign_channel_announcement(node_signer, announcement) {
6959                         Ok(res) => Some(res),
6960                         Err(_) => None,
6961                 }
6962         }
6963
6964         /// May panic if called on a channel that wasn't immediately-previously
6965         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6966         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6967                 assert!(self.context.channel_state.is_peer_disconnected());
6968                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6969                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6970                 // current to_remote balances. However, it no longer has any use, and thus is now simply
6971                 // set to a dummy (but valid, as required by the spec) public key.
6972                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6973                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6974                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6975                 let mut pk = [2; 33]; pk[1] = 0xff;
6976                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6977                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6978                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6979                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6980                         remote_last_secret
6981                 } else {
6982                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6983                         [0;32]
6984                 };
6985                 self.mark_awaiting_response();
6986                 msgs::ChannelReestablish {
6987                         channel_id: self.context.channel_id(),
6988                         // The protocol has two different commitment number concepts - the "commitment
6989                         // transaction number", which starts from 0 and counts up, and the "revocation key
6990                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6991                         // commitment transaction numbers by the index which will be used to reveal the
6992                         // revocation key for that commitment transaction, which means we have to convert them
6993                         // to protocol-level commitment numbers here...
6994
6995                         // next_local_commitment_number is the next commitment_signed number we expect to
6996                         // receive (indicating if they need to resend one that we missed).
6997                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6998                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6999                         // receive, however we track it by the next commitment number for a remote transaction
7000                         // (which is one further, as they always revoke previous commitment transaction, not
7001                         // the one we send) so we have to decrement by 1. Note that if
7002                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
7003                         // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
7004                         // overflow here.
7005                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
7006                         your_last_per_commitment_secret: remote_last_secret,
7007                         my_current_per_commitment_point: dummy_pubkey,
7008                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
7009                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
7010                         // txid of that interactive transaction, else we MUST NOT set it.
7011                         next_funding_txid: None,
7012                 }
7013         }
7014
7015
7016         // Send stuff to our remote peers:
7017
7018         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
7019         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
7020         /// commitment update.
7021         ///
7022         /// `Err`s will only be [`ChannelError::Ignore`].
7023         pub fn queue_add_htlc<F: Deref, L: Deref>(
7024                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
7025                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7026                 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7027         ) -> Result<(), ChannelError>
7028         where F::Target: FeeEstimator, L::Target: Logger
7029         {
7030                 self
7031                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
7032                                 skimmed_fee_msat, blinding_point, fee_estimator, logger)
7033                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
7034                         .map_err(|err| {
7035                                 if let ChannelError::Ignore(_) = err { /* fine */ }
7036                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
7037                                 err
7038                         })
7039         }
7040
7041         /// Adds a pending outbound HTLC to this channel, note that you probably want
7042         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
7043         ///
7044         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
7045         /// the wire:
7046         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
7047         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
7048         ///   awaiting ACK.
7049         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
7050         ///   we may not yet have sent the previous commitment update messages and will need to
7051         ///   regenerate them.
7052         ///
7053         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
7054         /// on this [`Channel`] if `force_holding_cell` is false.
7055         ///
7056         /// `Err`s will only be [`ChannelError::Ignore`].
7057         fn send_htlc<F: Deref, L: Deref>(
7058                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
7059                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
7060                 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
7061                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7062         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
7063         where F::Target: FeeEstimator, L::Target: Logger
7064         {
7065                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
7066                         self.context.channel_state.is_local_shutdown_sent() ||
7067                         self.context.channel_state.is_remote_shutdown_sent()
7068                 {
7069                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
7070                 }
7071                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
7072                 if amount_msat > channel_total_msat {
7073                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
7074                 }
7075
7076                 if amount_msat == 0 {
7077                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
7078                 }
7079
7080                 let available_balances = self.context.get_available_balances(fee_estimator);
7081                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
7082                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
7083                                 available_balances.next_outbound_htlc_minimum_msat)));
7084                 }
7085
7086                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
7087                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
7088                                 available_balances.next_outbound_htlc_limit_msat)));
7089                 }
7090
7091                 if self.context.channel_state.is_peer_disconnected() {
7092                         // Note that this should never really happen, if we're !is_live() on receipt of an
7093                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
7094                         // the user to send directly into a !is_live() channel. However, if we
7095                         // disconnected during the time the previous hop was doing the commitment dance we may
7096                         // end up getting here after the forwarding delay. In any case, returning an
7097                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
7098                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
7099                 }
7100
7101                 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
7102                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
7103                         payment_hash, amount_msat,
7104                         if force_holding_cell { "into holding cell" }
7105                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
7106                         else { "to peer" });
7107
7108                 if need_holding_cell {
7109                         force_holding_cell = true;
7110                 }
7111
7112                 // Now update local state:
7113                 if force_holding_cell {
7114                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
7115                                 amount_msat,
7116                                 payment_hash,
7117                                 cltv_expiry,
7118                                 source,
7119                                 onion_routing_packet,
7120                                 skimmed_fee_msat,
7121                                 blinding_point,
7122                         });
7123                         return Ok(None);
7124                 }
7125
7126                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
7127                         htlc_id: self.context.next_holder_htlc_id,
7128                         amount_msat,
7129                         payment_hash: payment_hash.clone(),
7130                         cltv_expiry,
7131                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
7132                         source,
7133                         blinding_point,
7134                         skimmed_fee_msat,
7135                 });
7136
7137                 let res = msgs::UpdateAddHTLC {
7138                         channel_id: self.context.channel_id,
7139                         htlc_id: self.context.next_holder_htlc_id,
7140                         amount_msat,
7141                         payment_hash,
7142                         cltv_expiry,
7143                         onion_routing_packet,
7144                         skimmed_fee_msat,
7145                         blinding_point,
7146                 };
7147                 self.context.next_holder_htlc_id += 1;
7148
7149                 Ok(Some(res))
7150         }
7151
7152         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
7153                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
7154                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
7155                 // fail to generate this, we still are at least at a position where upgrading their status
7156                 // is acceptable.
7157                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
7158                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
7159                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
7160                         } else { None };
7161                         if let Some(state) = new_state {
7162                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
7163                                 htlc.state = state;
7164                         }
7165                 }
7166                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
7167                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
7168                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
7169                                 // Grab the preimage, if it exists, instead of cloning
7170                                 let mut reason = OutboundHTLCOutcome::Success(None);
7171                                 mem::swap(outcome, &mut reason);
7172                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
7173                         }
7174                 }
7175                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
7176                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
7177                                 debug_assert!(!self.context.is_outbound());
7178                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
7179                                 self.context.feerate_per_kw = feerate;
7180                                 self.context.pending_update_fee = None;
7181                         }
7182                 }
7183                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
7184
7185                 let (mut htlcs_ref, counterparty_commitment_tx) =
7186                         self.build_commitment_no_state_update(logger);
7187                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
7188                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
7189                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
7190
7191                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
7192                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
7193                 }
7194
7195                 self.context.latest_monitor_update_id += 1;
7196                 let monitor_update = ChannelMonitorUpdate {
7197                         update_id: self.context.latest_monitor_update_id,
7198                         counterparty_node_id: Some(self.context.counterparty_node_id),
7199                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
7200                                 commitment_txid: counterparty_commitment_txid,
7201                                 htlc_outputs: htlcs.clone(),
7202                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
7203                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
7204                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
7205                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
7206                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
7207                         }],
7208                         channel_id: Some(self.context.channel_id()),
7209                 };
7210                 self.context.channel_state.set_awaiting_remote_revoke();
7211                 monitor_update
7212         }
7213
7214         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
7215         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
7216         where L::Target: Logger
7217         {
7218                 let counterparty_keys = self.context.build_remote_transaction_keys();
7219                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7220                 let counterparty_commitment_tx = commitment_stats.tx;
7221
7222                 #[cfg(any(test, fuzzing))]
7223                 {
7224                         if !self.context.is_outbound() {
7225                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
7226                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
7227                                 if let Some(info) = projected_commit_tx_info {
7228                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
7229                                         if info.total_pending_htlcs == total_pending_htlcs
7230                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
7231                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
7232                                                 && info.feerate == self.context.feerate_per_kw {
7233                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
7234                                                         assert_eq!(actual_fee, info.fee);
7235                                                 }
7236                                 }
7237                         }
7238                 }
7239
7240                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
7241         }
7242
7243         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
7244         /// generation when we shouldn't change HTLC/channel state.
7245         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
7246                 // Get the fee tests from `build_commitment_no_state_update`
7247                 #[cfg(any(test, fuzzing))]
7248                 self.build_commitment_no_state_update(logger);
7249
7250                 let counterparty_keys = self.context.build_remote_transaction_keys();
7251                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7252                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
7253
7254                 match &self.context.holder_signer {
7255                         ChannelSignerType::Ecdsa(ecdsa) => {
7256                                 let (signature, htlc_signatures);
7257
7258                                 {
7259                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
7260                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
7261                                                 htlcs.push(htlc);
7262                                         }
7263
7264                                         let res = ecdsa.sign_counterparty_commitment(
7265                                                         &commitment_stats.tx,
7266                                                         commitment_stats.inbound_htlc_preimages,
7267                                                         commitment_stats.outbound_htlc_preimages,
7268                                                         &self.context.secp_ctx,
7269                                                 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
7270                                         signature = res.0;
7271                                         htlc_signatures = res.1;
7272
7273                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
7274                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
7275                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
7276                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
7277
7278                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
7279                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
7280                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
7281                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
7282                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
7283                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
7284                                         }
7285                                 }
7286
7287                                 Ok((msgs::CommitmentSigned {
7288                                         channel_id: self.context.channel_id,
7289                                         signature,
7290                                         htlc_signatures,
7291                                         #[cfg(taproot)]
7292                                         partial_signature_with_nonce: None,
7293                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
7294                         },
7295                         // TODO (taproot|arik)
7296                         #[cfg(taproot)]
7297                         _ => todo!()
7298                 }
7299         }
7300
7301         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
7302         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
7303         ///
7304         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
7305         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
7306         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
7307                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
7308                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7309                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7310         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
7311         where F::Target: FeeEstimator, L::Target: Logger
7312         {
7313                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
7314                         onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
7315                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
7316                 match send_res? {
7317                         Some(_) => {
7318                                 let monitor_update = self.build_commitment_no_status_check(logger);
7319                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
7320                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
7321                         },
7322                         None => Ok(None)
7323                 }
7324         }
7325
7326         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
7327         /// happened.
7328         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
7329                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
7330                         fee_base_msat: msg.contents.fee_base_msat,
7331                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
7332                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
7333                 });
7334                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
7335                 if did_change {
7336                         self.context.counterparty_forwarding_info = new_forwarding_info;
7337                 }
7338
7339                 Ok(did_change)
7340         }
7341
7342         /// Begins the shutdown process, getting a message for the remote peer and returning all
7343         /// holding cell HTLCs for payment failure.
7344         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
7345                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
7346         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
7347         {
7348                 for htlc in self.context.pending_outbound_htlcs.iter() {
7349                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
7350                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
7351                         }
7352                 }
7353                 if self.context.channel_state.is_local_shutdown_sent() {
7354                         return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
7355                 }
7356                 else if self.context.channel_state.is_remote_shutdown_sent() {
7357                         return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
7358                 }
7359                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
7360                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
7361                 }
7362                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
7363                 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
7364                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
7365                 }
7366
7367                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
7368                         Some(_) => false,
7369                         None => {
7370                                 // use override shutdown script if provided
7371                                 let shutdown_scriptpubkey = match override_shutdown_script {
7372                                         Some(script) => script,
7373                                         None => {
7374                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
7375                                                 match signer_provider.get_shutdown_scriptpubkey() {
7376                                                         Ok(scriptpubkey) => scriptpubkey,
7377                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
7378                                                 }
7379                                         },
7380                                 };
7381                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
7382                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
7383                                 }
7384                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
7385                                 true
7386                         },
7387                 };
7388
7389                 // From here on out, we may not fail!
7390                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
7391                 self.context.channel_state.set_local_shutdown_sent();
7392                 self.context.local_initiated_shutdown = Some(());
7393                 self.context.update_time_counter += 1;
7394
7395                 let monitor_update = if update_shutdown_script {
7396                         self.context.latest_monitor_update_id += 1;
7397                         let monitor_update = ChannelMonitorUpdate {
7398                                 update_id: self.context.latest_monitor_update_id,
7399                                 counterparty_node_id: Some(self.context.counterparty_node_id),
7400                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
7401                                         scriptpubkey: self.get_closing_scriptpubkey(),
7402                                 }],
7403                                 channel_id: Some(self.context.channel_id()),
7404                         };
7405                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
7406                         self.push_ret_blockable_mon_update(monitor_update)
7407                 } else { None };
7408                 let shutdown = msgs::Shutdown {
7409                         channel_id: self.context.channel_id,
7410                         scriptpubkey: self.get_closing_scriptpubkey(),
7411                 };
7412
7413                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
7414                 // our shutdown until we've committed all of the pending changes.
7415                 self.context.holding_cell_update_fee = None;
7416                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
7417                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
7418                         match htlc_update {
7419                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
7420                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
7421                                         false
7422                                 },
7423                                 _ => true
7424                         }
7425                 });
7426
7427                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
7428                         "we can't both complete shutdown and return a monitor update");
7429
7430                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
7431         }
7432
7433         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
7434                 self.context.holding_cell_htlc_updates.iter()
7435                         .flat_map(|htlc_update| {
7436                                 match htlc_update {
7437                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
7438                                                 => Some((source, payment_hash)),
7439                                         _ => None,
7440                                 }
7441                         })
7442                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
7443         }
7444 }
7445
7446 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
7447 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7448         pub context: ChannelContext<SP>,
7449         pub unfunded_context: UnfundedChannelContext,
7450 }
7451
7452 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
7453         pub fn new<ES: Deref, F: Deref>(
7454                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
7455                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
7456                 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
7457         ) -> Result<OutboundV1Channel<SP>, APIError>
7458         where ES::Target: EntropySource,
7459               F::Target: FeeEstimator
7460         {
7461                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
7462                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7463                         // Protocol level safety check in place, although it should never happen because
7464                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7465                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
7466                                 implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
7467                 }
7468
7469                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
7470                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7471                 let pubkeys = holder_signer.pubkeys().clone();
7472
7473                 let chan = Self {
7474                         context: ChannelContext::new_for_outbound_channel(
7475                                 fee_estimator,
7476                                 entropy_source,
7477                                 signer_provider,
7478                                 counterparty_node_id,
7479                                 their_features,
7480                                 channel_value_satoshis,
7481                                 push_msat,
7482                                 user_id,
7483                                 config,
7484                                 current_chain_height,
7485                                 outbound_scid_alias,
7486                                 temporary_channel_id,
7487                                 holder_selected_channel_reserve_satoshis,
7488                                 channel_keys_id,
7489                                 holder_signer,
7490                                 pubkeys,
7491                         )?,
7492                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7493                 };
7494                 Ok(chan)
7495         }
7496
7497         /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7498         fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7499                 let counterparty_keys = self.context.build_remote_transaction_keys();
7500                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7501                 let signature = match &self.context.holder_signer {
7502                         // TODO (taproot|arik): move match into calling method for Taproot
7503                         ChannelSignerType::Ecdsa(ecdsa) => {
7504                                 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7505                                         .map(|(sig, _)| sig).ok()?
7506                         },
7507                         // TODO (taproot|arik)
7508                         #[cfg(taproot)]
7509                         _ => todo!()
7510                 };
7511
7512                 if self.context.signer_pending_funding {
7513                         log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7514                         self.context.signer_pending_funding = false;
7515                 }
7516
7517                 Some(msgs::FundingCreated {
7518                         temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7519                         funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7520                         funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7521                         signature,
7522                         #[cfg(taproot)]
7523                         partial_signature_with_nonce: None,
7524                         #[cfg(taproot)]
7525                         next_local_nonce: None,
7526                 })
7527         }
7528
7529         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7530         /// a funding_created message for the remote peer.
7531         /// Panics if called at some time other than immediately after initial handshake, if called twice,
7532         /// or if called on an inbound channel.
7533         /// Note that channel_id changes during this call!
7534         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7535         /// If an Err is returned, it is a ChannelError::Close.
7536         pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7537         -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7538                 if !self.context.is_outbound() {
7539                         panic!("Tried to create outbound funding_created message on an inbound channel!");
7540                 }
7541                 if !matches!(
7542                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7543                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7544                 ) {
7545                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7546                 }
7547                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7548                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7549                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7550                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7551                 }
7552
7553                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7554                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7555
7556                 // Now that we're past error-generating stuff, update our local state:
7557
7558                 self.context.channel_state = ChannelState::FundingNegotiated;
7559                 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7560
7561                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7562                 // We can skip this if it is a zero-conf channel.
7563                 if funding_transaction.is_coin_base() &&
7564                         self.context.minimum_depth.unwrap_or(0) > 0 &&
7565                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7566                         self.context.minimum_depth = Some(COINBASE_MATURITY);
7567                 }
7568
7569                 self.context.funding_transaction = Some(funding_transaction);
7570                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7571
7572                 let funding_created = self.get_funding_created_msg(logger);
7573                 if funding_created.is_none() {
7574                         #[cfg(not(async_signing))] {
7575                                 panic!("Failed to get signature for new funding creation");
7576                         }
7577                         #[cfg(async_signing)] {
7578                                 if !self.context.signer_pending_funding {
7579                                         log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7580                                         self.context.signer_pending_funding = true;
7581                                 }
7582                         }
7583                 }
7584
7585                 Ok(funding_created)
7586         }
7587
7588         /// If we receive an error message, it may only be a rejection of the channel type we tried,
7589         /// not of our ability to open any channel at all. Thus, on error, we should first call this
7590         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7591         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7592                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7593         ) -> Result<msgs::OpenChannel, ()>
7594         where
7595                 F::Target: FeeEstimator
7596         {
7597                 self.context.maybe_downgrade_channel_features(fee_estimator)?;
7598                 Ok(self.get_open_channel(chain_hash))
7599         }
7600
7601         /// Returns true if we can resume the channel by sending the [`msgs::OpenChannel`] again.
7602         pub fn is_resumable(&self) -> bool {
7603                 !self.context.have_received_message() &&
7604                         self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER
7605         }
7606
7607         pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7608                 if !self.context.is_outbound() {
7609                         panic!("Tried to open a channel for an inbound channel?");
7610                 }
7611                 if self.context.have_received_message() {
7612                         panic!("Cannot generate an open_channel after we've moved forward");
7613                 }
7614
7615                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7616                         panic!("Tried to send an open_channel for a channel that has already advanced");
7617                 }
7618
7619                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7620                 let keys = self.context.get_holder_pubkeys();
7621
7622                 msgs::OpenChannel {
7623                         common_fields: msgs::CommonOpenChannelFields {
7624                                 chain_hash,
7625                                 temporary_channel_id: self.context.channel_id,
7626                                 funding_satoshis: self.context.channel_value_satoshis,
7627                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7628                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7629                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7630                                 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7631                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
7632                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7633                                 funding_pubkey: keys.funding_pubkey,
7634                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7635                                 payment_basepoint: keys.payment_point,
7636                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7637                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7638                                 first_per_commitment_point,
7639                                 channel_flags: if self.context.config.announced_channel {1} else {0},
7640                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7641                                         Some(script) => script.clone().into_inner(),
7642                                         None => Builder::new().into_script(),
7643                                 }),
7644                                 channel_type: Some(self.context.channel_type.clone()),
7645                         },
7646                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7647                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7648                 }
7649         }
7650
7651         // Message handlers
7652         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7653                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7654
7655                 // Check sanity of message fields:
7656                 if !self.context.is_outbound() {
7657                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7658                 }
7659                 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7660                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7661                 }
7662                 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7663                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7664                 }
7665                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7666                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7667                 }
7668                 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7669                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7670                 }
7671                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7672                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7673                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7674                 }
7675                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7676                 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7677                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7678                 }
7679                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7680                 if msg.common_fields.to_self_delay > max_delay_acceptable {
7681                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7682                 }
7683                 if msg.common_fields.max_accepted_htlcs < 1 {
7684                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7685                 }
7686                 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7687                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7688                 }
7689
7690                 // Now check against optional parameters as set by config...
7691                 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7692                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7693                 }
7694                 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7695                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7696                 }
7697                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7698                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7699                 }
7700                 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7701                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7702                 }
7703                 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7704                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7705                 }
7706                 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7707                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7708                 }
7709                 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7710                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7711                 }
7712
7713                 if let Some(ty) = &msg.common_fields.channel_type {
7714                         if *ty != self.context.channel_type {
7715                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7716                         }
7717                 } else if their_features.supports_channel_type() {
7718                         // Assume they've accepted the channel type as they said they understand it.
7719                 } else {
7720                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
7721                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7722                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7723                         }
7724                         self.context.channel_type = channel_type.clone();
7725                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
7726                 }
7727
7728                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7729                         match &msg.common_fields.shutdown_scriptpubkey {
7730                                 &Some(ref script) => {
7731                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7732                                         if script.len() == 0 {
7733                                                 None
7734                                         } else {
7735                                                 if !script::is_bolt2_compliant(&script, their_features) {
7736                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7737                                                 }
7738                                                 Some(script.clone())
7739                                         }
7740                                 },
7741                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7742                                 &None => {
7743                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7744                                 }
7745                         }
7746                 } else { None };
7747
7748                 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7749                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7750                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7751                 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7752                 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7753
7754                 if peer_limits.trust_own_funding_0conf {
7755                         self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7756                 } else {
7757                         self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7758                 }
7759
7760                 let counterparty_pubkeys = ChannelPublicKeys {
7761                         funding_pubkey: msg.common_fields.funding_pubkey,
7762                         revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7763                         payment_point: msg.common_fields.payment_basepoint,
7764                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7765                         htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7766                 };
7767
7768                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7769                         selected_contest_delay: msg.common_fields.to_self_delay,
7770                         pubkeys: counterparty_pubkeys,
7771                 });
7772
7773                 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7774                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7775
7776                 self.context.channel_state = ChannelState::NegotiatingFunding(
7777                         NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7778                 );
7779                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7780
7781                 Ok(())
7782         }
7783
7784         /// Handles a funding_signed message from the remote end.
7785         /// If this call is successful, broadcast the funding transaction (and not before!)
7786         pub fn funding_signed<L: Deref>(
7787                 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7788         ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7789         where
7790                 L::Target: Logger
7791         {
7792                 if !self.context.is_outbound() {
7793                         return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7794                 }
7795                 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7796                         return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7797                 }
7798                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7799                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7800                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7801                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7802                 }
7803
7804                 let funding_script = self.context.get_funding_redeemscript();
7805
7806                 let counterparty_keys = self.context.build_remote_transaction_keys();
7807                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7808                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7809                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7810
7811                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7812                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7813
7814                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7815                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7816                 {
7817                         let trusted_tx = initial_commitment_tx.trust();
7818                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7819                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7820                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7821                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7822                                 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7823                         }
7824                 }
7825
7826                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7827                         initial_commitment_tx,
7828                         msg.signature,
7829                         Vec::new(),
7830                         &self.context.get_holder_pubkeys().funding_pubkey,
7831                         self.context.counterparty_funding_pubkey()
7832                 );
7833
7834                 let validated =
7835                         self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7836                 if validated.is_err() {
7837                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7838                 }
7839
7840                 let funding_redeemscript = self.context.get_funding_redeemscript();
7841                 let funding_txo = self.context.get_funding_txo().unwrap();
7842                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7843                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7844                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7845                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7846                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7847                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7848                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
7849                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
7850                                                           &self.context.channel_transaction_parameters,
7851                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
7852                                                           obscure_factor,
7853                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7854                 channel_monitor.provide_initial_counterparty_commitment_tx(
7855                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
7856                         self.context.cur_counterparty_commitment_transaction_number,
7857                         self.context.counterparty_cur_commitment_point.unwrap(),
7858                         counterparty_initial_commitment_tx.feerate_per_kw(),
7859                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7860                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7861
7862                 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7863                 if self.context.is_batch_funding() {
7864                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7865                 } else {
7866                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7867                 }
7868                 self.context.cur_holder_commitment_transaction_number -= 1;
7869                 self.context.cur_counterparty_commitment_transaction_number -= 1;
7870
7871                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7872
7873                 let mut channel = Channel {
7874                         context: self.context,
7875                         #[cfg(any(dual_funding, splicing))]
7876                         dual_funding_channel_context: None,
7877                 };
7878
7879                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7880                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7881                 Ok((channel, channel_monitor))
7882         }
7883
7884         /// Indicates that the signer may have some signatures for us, so we should retry if we're
7885         /// blocked.
7886         #[cfg(async_signing)]
7887         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7888                 if self.context.signer_pending_funding && self.context.is_outbound() {
7889                         log_trace!(logger, "Signer unblocked a funding_created");
7890                         self.get_funding_created_msg(logger)
7891                 } else { None }
7892         }
7893 }
7894
7895 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7896 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7897         pub context: ChannelContext<SP>,
7898         pub unfunded_context: UnfundedChannelContext,
7899 }
7900
7901 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7902 /// [`msgs::CommonOpenChannelFields`].
7903 pub(super) fn channel_type_from_open_channel(
7904         common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
7905         our_supported_features: &ChannelTypeFeatures
7906 ) -> Result<ChannelTypeFeatures, ChannelError> {
7907         if let Some(channel_type) = &common_fields.channel_type {
7908                 if channel_type.supports_any_optional_bits() {
7909                         return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7910                 }
7911
7912                 // We only support the channel types defined by the `ChannelManager` in
7913                 // `provided_channel_type_features`. The channel type must always support
7914                 // `static_remote_key`.
7915                 if !channel_type.requires_static_remote_key() {
7916                         return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7917                 }
7918                 // Make sure we support all of the features behind the channel type.
7919                 if !channel_type.is_subset(our_supported_features) {
7920                         return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7921                 }
7922                 let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
7923                 if channel_type.requires_scid_privacy() && announced_channel {
7924                         return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7925                 }
7926                 Ok(channel_type.clone())
7927         } else {
7928                 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7929                 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7930                         return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7931                 }
7932                 Ok(channel_type)
7933         }
7934 }
7935
7936 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7937         /// Creates a new channel from a remote sides' request for one.
7938         /// Assumes chain_hash has already been checked and corresponds with what we expect!
7939         pub fn new<ES: Deref, F: Deref, L: Deref>(
7940                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7941                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7942                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7943                 current_chain_height: u32, logger: &L, is_0conf: bool,
7944         ) -> Result<InboundV1Channel<SP>, ChannelError>
7945                 where ES::Target: EntropySource,
7946                           F::Target: FeeEstimator,
7947                           L::Target: Logger,
7948         {
7949                 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id), None);
7950
7951                 // First check the channel type is known, failing before we do anything else if we don't
7952                 // support this channel type.
7953                 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7954
7955                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7956                 let counterparty_pubkeys = ChannelPublicKeys {
7957                         funding_pubkey: msg.common_fields.funding_pubkey,
7958                         revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7959                         payment_point: msg.common_fields.payment_basepoint,
7960                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7961                         htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7962                 };
7963
7964                 let chan = Self {
7965                         context: ChannelContext::new_for_inbound_channel(
7966                                 fee_estimator,
7967                                 entropy_source,
7968                                 signer_provider,
7969                                 counterparty_node_id,
7970                                 their_features,
7971                                 user_id,
7972                                 config,
7973                                 current_chain_height,
7974                                 &&logger,
7975                                 is_0conf,
7976                                 0,
7977
7978                                 counterparty_pubkeys,
7979                                 channel_type,
7980                                 holder_selected_channel_reserve_satoshis,
7981                                 msg.channel_reserve_satoshis,
7982                                 msg.push_msat,
7983                                 msg.common_fields.clone(),
7984                         )?,
7985                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7986                 };
7987                 Ok(chan)
7988         }
7989
7990         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7991         /// should be sent back to the counterparty node.
7992         ///
7993         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7994         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7995                 if self.context.is_outbound() {
7996                         panic!("Tried to send accept_channel for an outbound channel?");
7997                 }
7998                 if !matches!(
7999                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8000                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8001                 ) {
8002                         panic!("Tried to send accept_channel after channel had moved forward");
8003                 }
8004                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8005                         panic!("Tried to send an accept_channel for a channel that has already advanced");
8006                 }
8007
8008                 self.generate_accept_channel_message()
8009         }
8010
8011         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8012         /// inbound channel. If the intention is to accept an inbound channel, use
8013         /// [`InboundV1Channel::accept_inbound_channel`] instead.
8014         ///
8015         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
8016         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
8017                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8018                 let keys = self.context.get_holder_pubkeys();
8019
8020                 msgs::AcceptChannel {
8021                         common_fields: msgs::CommonAcceptChannelFields {
8022                                 temporary_channel_id: self.context.channel_id,
8023                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8024                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8025                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8026                                 minimum_depth: self.context.minimum_depth.unwrap(),
8027                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
8028                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8029                                 funding_pubkey: keys.funding_pubkey,
8030                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8031                                 payment_basepoint: keys.payment_point,
8032                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8033                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8034                                 first_per_commitment_point,
8035                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8036                                         Some(script) => script.clone().into_inner(),
8037                                         None => Builder::new().into_script(),
8038                                 }),
8039                                 channel_type: Some(self.context.channel_type.clone()),
8040                         },
8041                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
8042                         #[cfg(taproot)]
8043                         next_local_nonce: None,
8044                 }
8045         }
8046
8047         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
8048         /// inbound channel without accepting it.
8049         ///
8050         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
8051         #[cfg(test)]
8052         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
8053                 self.generate_accept_channel_message()
8054         }
8055
8056         fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
8057                 let funding_script = self.context.get_funding_redeemscript();
8058
8059                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
8060                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
8061                 let trusted_tx = initial_commitment_tx.trust();
8062                 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
8063                 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
8064                 // They sign the holder commitment transaction...
8065                 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
8066                         log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
8067                         encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
8068                         encode::serialize_hex(&funding_script), &self.context.channel_id());
8069                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
8070
8071                 Ok(initial_commitment_tx)
8072         }
8073
8074         pub fn funding_created<L: Deref>(
8075                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
8076         ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
8077         where
8078                 L::Target: Logger
8079         {
8080                 if self.context.is_outbound() {
8081                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
8082                 }
8083                 if !matches!(
8084                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8085                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8086                 ) {
8087                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
8088                         // remember the channel, so it's safe to just send an error_message here and drop the
8089                         // channel.
8090                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
8091                 }
8092                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
8093                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
8094                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8095                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
8096                 }
8097
8098                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
8099                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
8100                 // This is an externally observable change before we finish all our checks.  In particular
8101                 // check_funding_created_signature may fail.
8102                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
8103
8104                 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
8105                         Ok(res) => res,
8106                         Err(ChannelError::Close(e)) => {
8107                                 self.context.channel_transaction_parameters.funding_outpoint = None;
8108                                 return Err((self, ChannelError::Close(e)));
8109                         },
8110                         Err(e) => {
8111                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
8112                                 // to make sure we don't continue with an inconsistent state.
8113                                 panic!("unexpected error type from check_funding_created_signature {:?}", e);
8114                         }
8115                 };
8116
8117                 let holder_commitment_tx = HolderCommitmentTransaction::new(
8118                         initial_commitment_tx,
8119                         msg.signature,
8120                         Vec::new(),
8121                         &self.context.get_holder_pubkeys().funding_pubkey,
8122                         self.context.counterparty_funding_pubkey()
8123                 );
8124
8125                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
8126                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
8127                 }
8128
8129                 // Now that we're past error-generating stuff, update our local state:
8130
8131                 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
8132                 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
8133                 self.context.cur_counterparty_commitment_transaction_number -= 1;
8134                 self.context.cur_holder_commitment_transaction_number -= 1;
8135
8136                 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
8137
8138                 let funding_redeemscript = self.context.get_funding_redeemscript();
8139                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
8140                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
8141                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
8142                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
8143                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
8144                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
8145                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
8146                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
8147                                                           &self.context.channel_transaction_parameters,
8148                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
8149                                                           obscure_factor,
8150                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
8151                 channel_monitor.provide_initial_counterparty_commitment_tx(
8152                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
8153                         self.context.cur_counterparty_commitment_transaction_number + 1,
8154                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
8155                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
8156                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
8157
8158                 log_info!(logger, "{} funding_signed for peer for channel {}",
8159                         if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
8160
8161                 // Promote the channel to a full-fledged one now that we have updated the state and have a
8162                 // `ChannelMonitor`.
8163                 let mut channel = Channel {
8164                         context: self.context,
8165                         #[cfg(any(dual_funding, splicing))]
8166                         dual_funding_channel_context: None,
8167                 };
8168                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
8169                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
8170
8171                 Ok((channel, funding_signed, channel_monitor))
8172         }
8173 }
8174
8175 // A not-yet-funded outbound (from holder) channel using V2 channel establishment.
8176 #[cfg(any(dual_funding, splicing))]
8177 pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8178         pub context: ChannelContext<SP>,
8179         pub unfunded_context: UnfundedChannelContext,
8180         #[cfg(any(dual_funding, splicing))]
8181         pub dual_funding_context: DualFundingChannelContext,
8182 }
8183
8184 #[cfg(any(dual_funding, splicing))]
8185 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
8186         pub fn new<ES: Deref, F: Deref>(
8187                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8188                 counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
8189                 user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
8190                 funding_confirmation_target: ConfirmationTarget,
8191         ) -> Result<OutboundV2Channel<SP>, APIError>
8192         where ES::Target: EntropySource,
8193               F::Target: FeeEstimator,
8194         {
8195                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
8196                 let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
8197                 let pubkeys = holder_signer.pubkeys().clone();
8198
8199                 let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
8200
8201                 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8202                         funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8203
8204                 let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
8205                 let funding_tx_locktime = current_chain_height;
8206
8207                 let chan = Self {
8208                         context: ChannelContext::new_for_outbound_channel(
8209                                 fee_estimator,
8210                                 entropy_source,
8211                                 signer_provider,
8212                                 counterparty_node_id,
8213                                 their_features,
8214                                 funding_satoshis,
8215                                 0,
8216                                 user_id,
8217                                 config,
8218                                 current_chain_height,
8219                                 outbound_scid_alias,
8220                                 temporary_channel_id,
8221                                 holder_selected_channel_reserve_satoshis,
8222                                 channel_keys_id,
8223                                 holder_signer,
8224                                 pubkeys,
8225                         )?,
8226                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8227                         dual_funding_context: DualFundingChannelContext {
8228                                 our_funding_satoshis: funding_satoshis,
8229                                 their_funding_satoshis: 0,
8230                                 funding_tx_locktime,
8231                                 funding_feerate_sat_per_1000_weight,
8232                         }
8233                 };
8234                 Ok(chan)
8235         }
8236
8237         /// If we receive an error message, it may only be a rejection of the channel type we tried,
8238         /// not of our ability to open any channel at all. Thus, on error, we should first call this
8239         /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
8240         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
8241                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
8242         ) -> Result<msgs::OpenChannelV2, ()>
8243         where
8244                 F::Target: FeeEstimator
8245         {
8246                 self.context.maybe_downgrade_channel_features(fee_estimator)?;
8247                 Ok(self.get_open_channel_v2(chain_hash))
8248         }
8249
8250         pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
8251                 if self.context.have_received_message() {
8252                         debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
8253                 }
8254
8255                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8256                         debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
8257                 }
8258
8259                 let first_per_commitment_point = self.context.holder_signer.as_ref()
8260                         .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
8261                                 &self.context.secp_ctx);
8262                 let second_per_commitment_point = self.context.holder_signer.as_ref()
8263                         .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
8264                                 &self.context.secp_ctx);
8265                 let keys = self.context.get_holder_pubkeys();
8266
8267                 msgs::OpenChannelV2 {
8268                         common_fields: msgs::CommonOpenChannelFields {
8269                                 chain_hash,
8270                                 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8271                                 funding_satoshis: self.context.channel_value_satoshis,
8272                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8273                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8274                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8275                                 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8276                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
8277                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8278                                 funding_pubkey: keys.funding_pubkey,
8279                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8280                                 payment_basepoint: keys.payment_point,
8281                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8282                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8283                                 first_per_commitment_point,
8284                                 channel_flags: if self.context.config.announced_channel {1} else {0},
8285                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8286                                         Some(script) => script.clone().into_inner(),
8287                                         None => Builder::new().into_script(),
8288                                 }),
8289                                 channel_type: Some(self.context.channel_type.clone()),
8290                         },
8291                         funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8292                         second_per_commitment_point,
8293                         locktime: self.dual_funding_context.funding_tx_locktime,
8294                         require_confirmed_inputs: None,
8295                 }
8296         }
8297 }
8298
8299 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
8300 #[cfg(any(dual_funding, splicing))]
8301 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8302         pub context: ChannelContext<SP>,
8303         pub unfunded_context: UnfundedChannelContext,
8304         pub dual_funding_context: DualFundingChannelContext,
8305 }
8306
8307 #[cfg(any(dual_funding, splicing))]
8308 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
8309         /// Creates a new dual-funded channel from a remote side's request for one.
8310         /// Assumes chain_hash has already been checked and corresponds with what we expect!
8311         pub fn new<ES: Deref, F: Deref, L: Deref>(
8312                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8313                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
8314                 their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
8315                 config: &UserConfig, current_chain_height: u32, logger: &L,
8316         ) -> Result<InboundV2Channel<SP>, ChannelError>
8317                 where ES::Target: EntropySource,
8318                           F::Target: FeeEstimator,
8319                           L::Target: Logger,
8320         {
8321                 let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
8322                 let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8323                         channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
8324                 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8325                         channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8326
8327                 // First check the channel type is known, failing before we do anything else if we don't
8328                 // support this channel type.
8329                 if msg.common_fields.channel_type.is_none() {
8330                         return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
8331                                 msg.common_fields.temporary_channel_id)))
8332                 }
8333                 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
8334
8335                 let counterparty_pubkeys = ChannelPublicKeys {
8336                         funding_pubkey: msg.common_fields.funding_pubkey,
8337                         revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
8338                         payment_point: msg.common_fields.payment_basepoint,
8339                         delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
8340                         htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
8341                 };
8342
8343                 let mut context = ChannelContext::new_for_inbound_channel(
8344                         fee_estimator,
8345                         entropy_source,
8346                         signer_provider,
8347                         counterparty_node_id,
8348                         their_features,
8349                         user_id,
8350                         config,
8351                         current_chain_height,
8352                         logger,
8353                         false,
8354
8355                         funding_satoshis,
8356
8357                         counterparty_pubkeys,
8358                         channel_type,
8359                         holder_selected_channel_reserve_satoshis,
8360                         counterparty_selected_channel_reserve_satoshis,
8361                         0 /* push_msat not used in dual-funding */,
8362                         msg.common_fields.clone(),
8363                 )?;
8364                 let channel_id = ChannelId::v2_from_revocation_basepoints(
8365                         &context.get_holder_pubkeys().revocation_basepoint,
8366                         &context.get_counterparty_pubkeys().revocation_basepoint);
8367                 context.channel_id = channel_id;
8368
8369                 let chan = Self {
8370                         context,
8371                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8372                         dual_funding_context: DualFundingChannelContext {
8373                                 our_funding_satoshis: funding_satoshis,
8374                                 their_funding_satoshis: msg.common_fields.funding_satoshis,
8375                                 funding_tx_locktime: msg.locktime,
8376                                 funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
8377                         }
8378                 };
8379
8380                 Ok(chan)
8381         }
8382
8383         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
8384         /// should be sent back to the counterparty node.
8385         ///
8386         /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8387         pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
8388                 if self.context.is_outbound() {
8389                         debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
8390                 }
8391                 if !matches!(
8392                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8393                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8394                 ) {
8395                         debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
8396                 }
8397                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8398                         debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
8399                 }
8400
8401                 self.generate_accept_channel_v2_message()
8402         }
8403
8404         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8405         /// inbound channel. If the intention is to accept an inbound channel, use
8406         /// [`InboundV1Channel::accept_inbound_channel`] instead.
8407         ///
8408         /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8409         fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8410                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8411                         self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8412                 let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8413                         self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
8414                 let keys = self.context.get_holder_pubkeys();
8415
8416                 msgs::AcceptChannelV2 {
8417                         common_fields: msgs::CommonAcceptChannelFields {
8418                                 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8419                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8420                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8421                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8422                                 minimum_depth: self.context.minimum_depth.unwrap(),
8423                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
8424                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8425                                 funding_pubkey: keys.funding_pubkey,
8426                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8427                                 payment_basepoint: keys.payment_point,
8428                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8429                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8430                                 first_per_commitment_point,
8431                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8432                                         Some(script) => script.clone().into_inner(),
8433                                         None => Builder::new().into_script(),
8434                                 }),
8435                                 channel_type: Some(self.context.channel_type.clone()),
8436                         },
8437                         funding_satoshis: self.dual_funding_context.our_funding_satoshis,
8438                         second_per_commitment_point,
8439                         require_confirmed_inputs: None,
8440                 }
8441         }
8442
8443         /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
8444         /// inbound channel without accepting it.
8445         ///
8446         /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8447         #[cfg(test)]
8448         pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8449                 self.generate_accept_channel_v2_message()
8450         }
8451 }
8452
8453 // Unfunded channel utilities
8454
8455 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
8456         // The default channel type (ie the first one we try) depends on whether the channel is
8457         // public - if it is, we just go with `only_static_remotekey` as it's the only option
8458         // available. If it's private, we first try `scid_privacy` as it provides better privacy
8459         // with no other changes, and fall back to `only_static_remotekey`.
8460         let mut ret = ChannelTypeFeatures::only_static_remote_key();
8461         if !config.channel_handshake_config.announced_channel &&
8462                 config.channel_handshake_config.negotiate_scid_privacy &&
8463                 their_features.supports_scid_privacy() {
8464                 ret.set_scid_privacy_required();
8465         }
8466
8467         // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
8468         // set it now. If they don't understand it, we'll fall back to our default of
8469         // `only_static_remotekey`.
8470         if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
8471                 their_features.supports_anchors_zero_fee_htlc_tx() {
8472                 ret.set_anchors_zero_fee_htlc_tx_required();
8473         }
8474
8475         ret
8476 }
8477
8478 const SERIALIZATION_VERSION: u8 = 4;
8479 const MIN_SERIALIZATION_VERSION: u8 = 3;
8480
8481 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
8482         (0, FailRelay),
8483         (1, FailMalformed),
8484         (2, Fulfill),
8485 );
8486
8487 impl Writeable for ChannelUpdateStatus {
8488         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8489                 // We only care about writing out the current state as it was announced, ie only either
8490                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
8491                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
8492                 match self {
8493                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
8494                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
8495                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
8496                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
8497                 }
8498                 Ok(())
8499         }
8500 }
8501
8502 impl Readable for ChannelUpdateStatus {
8503         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8504                 Ok(match <u8 as Readable>::read(reader)? {
8505                         0 => ChannelUpdateStatus::Enabled,
8506                         1 => ChannelUpdateStatus::Disabled,
8507                         _ => return Err(DecodeError::InvalidValue),
8508                 })
8509         }
8510 }
8511
8512 impl Writeable for AnnouncementSigsState {
8513         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8514                 // We only care about writing out the current state as if we had just disconnected, at
8515                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
8516                 match self {
8517                         AnnouncementSigsState::NotSent => 0u8.write(writer),
8518                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
8519                         AnnouncementSigsState::Committed => 0u8.write(writer),
8520                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
8521                 }
8522         }
8523 }
8524
8525 impl Readable for AnnouncementSigsState {
8526         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8527                 Ok(match <u8 as Readable>::read(reader)? {
8528                         0 => AnnouncementSigsState::NotSent,
8529                         1 => AnnouncementSigsState::PeerReceived,
8530                         _ => return Err(DecodeError::InvalidValue),
8531                 })
8532         }
8533 }
8534
8535 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
8536         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8537                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
8538                 // called.
8539
8540                 let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
8541                         InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
8542                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
8543                                 matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
8544                         },
8545                         _ => false,
8546                 }) {
8547                         SERIALIZATION_VERSION
8548                 } else {
8549                         MIN_SERIALIZATION_VERSION
8550                 };
8551                 write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
8552
8553                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8554                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
8555                 // the low bytes now and the optional high bytes later.
8556                 let user_id_low = self.context.user_id as u64;
8557                 user_id_low.write(writer)?;
8558
8559                 // Version 1 deserializers expected to read parts of the config object here. Version 2
8560                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
8561                 // `minimum_depth` we simply write dummy values here.
8562                 writer.write_all(&[0; 8])?;
8563
8564                 self.context.channel_id.write(writer)?;
8565                 {
8566                         let mut channel_state = self.context.channel_state;
8567                         if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
8568                                 channel_state.set_peer_disconnected();
8569                         } else {
8570                                 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
8571                         }
8572                         channel_state.to_u32().write(writer)?;
8573                 }
8574                 self.context.channel_value_satoshis.write(writer)?;
8575
8576                 self.context.latest_monitor_update_id.write(writer)?;
8577
8578                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
8579                 // deserialized from that format.
8580                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
8581                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
8582                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
8583                 }
8584                 self.context.destination_script.write(writer)?;
8585
8586                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
8587                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
8588                 self.context.value_to_self_msat.write(writer)?;
8589
8590                 let mut dropped_inbound_htlcs = 0;
8591                 for htlc in self.context.pending_inbound_htlcs.iter() {
8592                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
8593                                 dropped_inbound_htlcs += 1;
8594                         }
8595                 }
8596                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
8597                 for htlc in self.context.pending_inbound_htlcs.iter() {
8598                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
8599                                 continue; // Drop
8600                         }
8601                         htlc.htlc_id.write(writer)?;
8602                         htlc.amount_msat.write(writer)?;
8603                         htlc.cltv_expiry.write(writer)?;
8604                         htlc.payment_hash.write(writer)?;
8605                         match &htlc.state {
8606                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
8607                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
8608                                         1u8.write(writer)?;
8609                                         if version_to_write <= 3 {
8610                                                 if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8611                                                         pending_htlc_status.write(writer)?;
8612                                                 } else {
8613                                                         panic!();
8614                                                 }
8615                                         } else {
8616                                                 htlc_resolution.write(writer)?;
8617                                         }
8618                                 },
8619                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
8620                                         2u8.write(writer)?;
8621                                         if version_to_write <= 3 {
8622                                                 if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8623                                                         pending_htlc_status.write(writer)?;
8624                                                 } else {
8625                                                         panic!();
8626                                                 }
8627                                         } else {
8628                                                 htlc_resolution.write(writer)?;
8629                                         }
8630                                 },
8631                                 &InboundHTLCState::Committed => {
8632                                         3u8.write(writer)?;
8633                                 },
8634                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
8635                                         4u8.write(writer)?;
8636                                         removal_reason.write(writer)?;
8637                                 },
8638                         }
8639                 }
8640
8641                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
8642                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
8643                 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8644
8645                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
8646                 for htlc in self.context.pending_outbound_htlcs.iter() {
8647                         htlc.htlc_id.write(writer)?;
8648                         htlc.amount_msat.write(writer)?;
8649                         htlc.cltv_expiry.write(writer)?;
8650                         htlc.payment_hash.write(writer)?;
8651                         htlc.source.write(writer)?;
8652                         match &htlc.state {
8653                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8654                                         0u8.write(writer)?;
8655                                         onion_packet.write(writer)?;
8656                                 },
8657                                 &OutboundHTLCState::Committed => {
8658                                         1u8.write(writer)?;
8659                                 },
8660                                 &OutboundHTLCState::RemoteRemoved(_) => {
8661                                         // Treat this as a Committed because we haven't received the CS - they'll
8662                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8663                                         1u8.write(writer)?;
8664                                 },
8665                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8666                                         3u8.write(writer)?;
8667                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
8668                                                 preimages.push(preimage);
8669                                         }
8670                                         let reason: Option<&HTLCFailReason> = outcome.into();
8671                                         reason.write(writer)?;
8672                                 }
8673                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8674                                         4u8.write(writer)?;
8675                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
8676                                                 preimages.push(preimage);
8677                                         }
8678                                         let reason: Option<&HTLCFailReason> = outcome.into();
8679                                         reason.write(writer)?;
8680                                 }
8681                         }
8682                         pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8683                         pending_outbound_blinding_points.push(htlc.blinding_point);
8684                 }
8685
8686                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8687                 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8688                 // Vec of (htlc_id, failure_code, sha256_of_onion)
8689                 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8690                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8691                 for update in self.context.holding_cell_htlc_updates.iter() {
8692                         match update {
8693                                 &HTLCUpdateAwaitingACK::AddHTLC {
8694                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8695                                         blinding_point, skimmed_fee_msat,
8696                                 } => {
8697                                         0u8.write(writer)?;
8698                                         amount_msat.write(writer)?;
8699                                         cltv_expiry.write(writer)?;
8700                                         payment_hash.write(writer)?;
8701                                         source.write(writer)?;
8702                                         onion_routing_packet.write(writer)?;
8703
8704                                         holding_cell_skimmed_fees.push(skimmed_fee_msat);
8705                                         holding_cell_blinding_points.push(blinding_point);
8706                                 },
8707                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8708                                         1u8.write(writer)?;
8709                                         payment_preimage.write(writer)?;
8710                                         htlc_id.write(writer)?;
8711                                 },
8712                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8713                                         2u8.write(writer)?;
8714                                         htlc_id.write(writer)?;
8715                                         err_packet.write(writer)?;
8716                                 }
8717                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8718                                         htlc_id, failure_code, sha256_of_onion
8719                                 } => {
8720                                         // We don't want to break downgrading by adding a new variant, so write a dummy
8721                                         // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8722                                         malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8723
8724                                         let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8725                                         2u8.write(writer)?;
8726                                         htlc_id.write(writer)?;
8727                                         dummy_err_packet.write(writer)?;
8728                                 }
8729                         }
8730                 }
8731
8732                 match self.context.resend_order {
8733                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8734                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8735                 }
8736
8737                 self.context.monitor_pending_channel_ready.write(writer)?;
8738                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8739                 self.context.monitor_pending_commitment_signed.write(writer)?;
8740
8741                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8742                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8743                         pending_forward.write(writer)?;
8744                         htlc_id.write(writer)?;
8745                 }
8746
8747                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8748                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8749                         htlc_source.write(writer)?;
8750                         payment_hash.write(writer)?;
8751                         fail_reason.write(writer)?;
8752                 }
8753
8754                 if self.context.is_outbound() {
8755                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8756                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8757                         Some(feerate).write(writer)?;
8758                 } else {
8759                         // As for inbound HTLCs, if the update was only announced and never committed in a
8760                         // commitment_signed, drop it.
8761                         None::<u32>.write(writer)?;
8762                 }
8763                 self.context.holding_cell_update_fee.write(writer)?;
8764
8765                 self.context.next_holder_htlc_id.write(writer)?;
8766                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8767                 self.context.update_time_counter.write(writer)?;
8768                 self.context.feerate_per_kw.write(writer)?;
8769
8770                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8771                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8772                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8773                 // consider the stale state on reload.
8774                 0u8.write(writer)?;
8775
8776                 self.context.funding_tx_confirmed_in.write(writer)?;
8777                 self.context.funding_tx_confirmation_height.write(writer)?;
8778                 self.context.short_channel_id.write(writer)?;
8779
8780                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8781                 self.context.holder_dust_limit_satoshis.write(writer)?;
8782                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8783
8784                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8785                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8786
8787                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8788                 self.context.holder_htlc_minimum_msat.write(writer)?;
8789                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8790
8791                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8792                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8793
8794                 match &self.context.counterparty_forwarding_info {
8795                         Some(info) => {
8796                                 1u8.write(writer)?;
8797                                 info.fee_base_msat.write(writer)?;
8798                                 info.fee_proportional_millionths.write(writer)?;
8799                                 info.cltv_expiry_delta.write(writer)?;
8800                         },
8801                         None => 0u8.write(writer)?
8802                 }
8803
8804                 self.context.channel_transaction_parameters.write(writer)?;
8805                 self.context.funding_transaction.write(writer)?;
8806
8807                 self.context.counterparty_cur_commitment_point.write(writer)?;
8808                 self.context.counterparty_prev_commitment_point.write(writer)?;
8809                 self.context.counterparty_node_id.write(writer)?;
8810
8811                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8812
8813                 self.context.commitment_secrets.write(writer)?;
8814
8815                 self.context.channel_update_status.write(writer)?;
8816
8817                 #[cfg(any(test, fuzzing))]
8818                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8819                 #[cfg(any(test, fuzzing))]
8820                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8821                         htlc.write(writer)?;
8822                 }
8823
8824                 // If the channel type is something other than only-static-remote-key, then we need to have
8825                 // older clients fail to deserialize this channel at all. If the type is
8826                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8827                 // out at all.
8828                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8829                         Some(&self.context.channel_type) } else { None };
8830
8831                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8832                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8833                 // a different percentage of the channel value then 10%, which older versions of LDK used
8834                 // to set it to before the percentage was made configurable.
8835                 let serialized_holder_selected_reserve =
8836                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8837                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8838
8839                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8840                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8841                 let serialized_holder_htlc_max_in_flight =
8842                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8843                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8844
8845                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8846                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8847
8848                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8849                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8850                 // we write the high bytes as an option here.
8851                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8852
8853                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8854
8855                 let mut monitor_pending_update_adds = None;
8856                 if !self.context.monitor_pending_update_adds.is_empty() {
8857                         monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
8858                 }
8859
8860                 // `current_point` will become optional when async signing is implemented.
8861                 let cur_holder_commitment_point = Some(self.context.holder_commitment_point.current_point());
8862                 let next_holder_commitment_point = self.context.holder_commitment_point.next_point();
8863
8864                 write_tlv_fields!(writer, {
8865                         (0, self.context.announcement_sigs, option),
8866                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8867                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
8868                         // them twice, once with their original default values above, and once as an option
8869                         // here. On the read side, old versions will simply ignore the odd-type entries here,
8870                         // and new versions map the default values to None and allow the TLV entries here to
8871                         // override that.
8872                         (1, self.context.minimum_depth, option),
8873                         (2, chan_type, option),
8874                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8875                         (4, serialized_holder_selected_reserve, option),
8876                         (5, self.context.config, required),
8877                         (6, serialized_holder_htlc_max_in_flight, option),
8878                         (7, self.context.shutdown_scriptpubkey, option),
8879                         (8, self.context.blocked_monitor_updates, optional_vec),
8880                         (9, self.context.target_closing_feerate_sats_per_kw, option),
8881                         (10, monitor_pending_update_adds, option), // Added in 0.0.122
8882                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8883                         (13, self.context.channel_creation_height, required),
8884                         (15, preimages, required_vec),
8885                         (17, self.context.announcement_sigs_state, required),
8886                         (19, self.context.latest_inbound_scid_alias, option),
8887                         (21, self.context.outbound_scid_alias, required),
8888                         (23, channel_ready_event_emitted, option),
8889                         (25, user_id_high_opt, option),
8890                         (27, self.context.channel_keys_id, required),
8891                         (28, holder_max_accepted_htlcs, option),
8892                         (29, self.context.temporary_channel_id, option),
8893                         (31, channel_pending_event_emitted, option),
8894                         (35, pending_outbound_skimmed_fees, optional_vec),
8895                         (37, holding_cell_skimmed_fees, optional_vec),
8896                         (38, self.context.is_batch_funding, option),
8897                         (39, pending_outbound_blinding_points, optional_vec),
8898                         (41, holding_cell_blinding_points, optional_vec),
8899                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8900                         (45, cur_holder_commitment_point, option),
8901                         (47, next_holder_commitment_point, option),
8902                         (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8903                 });
8904
8905                 Ok(())
8906         }
8907 }
8908
8909 const MAX_ALLOC_SIZE: usize = 64*1024;
8910 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8911                 where
8912                         ES::Target: EntropySource,
8913                         SP::Target: SignerProvider
8914 {
8915         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8916                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8917                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8918
8919                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8920                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8921                 // the low bytes now and the high bytes later.
8922                 let user_id_low: u64 = Readable::read(reader)?;
8923
8924                 let mut config = Some(LegacyChannelConfig::default());
8925                 if ver == 1 {
8926                         // Read the old serialization of the ChannelConfig from version 0.0.98.
8927                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8928                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8929                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8930                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8931                 } else {
8932                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8933                         let mut _val: u64 = Readable::read(reader)?;
8934                 }
8935
8936                 let channel_id = Readable::read(reader)?;
8937                 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8938                 let channel_value_satoshis = Readable::read(reader)?;
8939
8940                 let latest_monitor_update_id = Readable::read(reader)?;
8941
8942                 let mut keys_data = None;
8943                 if ver <= 2 {
8944                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8945                         // the `channel_keys_id` TLV is present below.
8946                         let keys_len: u32 = Readable::read(reader)?;
8947                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8948                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
8949                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8950                                 let mut data = [0; 1024];
8951                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8952                                 reader.read_exact(read_slice)?;
8953                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8954                         }
8955                 }
8956
8957                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8958                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8959                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8960                         Err(_) => None,
8961                 };
8962                 let destination_script = Readable::read(reader)?;
8963
8964                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8965                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8966                 let value_to_self_msat = Readable::read(reader)?;
8967
8968                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8969
8970                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8971                 for _ in 0..pending_inbound_htlc_count {
8972                         pending_inbound_htlcs.push(InboundHTLCOutput {
8973                                 htlc_id: Readable::read(reader)?,
8974                                 amount_msat: Readable::read(reader)?,
8975                                 cltv_expiry: Readable::read(reader)?,
8976                                 payment_hash: Readable::read(reader)?,
8977                                 state: match <u8 as Readable>::read(reader)? {
8978                                         1 => {
8979                                                 let resolution = if ver <= 3 {
8980                                                         InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
8981                                                 } else {
8982                                                         Readable::read(reader)?
8983                                                 };
8984                                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution)
8985                                         },
8986                                         2 => {
8987                                                 let resolution = if ver <= 3 {
8988                                                         InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
8989                                                 } else {
8990                                                         Readable::read(reader)?
8991                                                 };
8992                                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution)
8993                                         },
8994                                         3 => InboundHTLCState::Committed,
8995                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8996                                         _ => return Err(DecodeError::InvalidValue),
8997                                 },
8998                         });
8999                 }
9000
9001                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
9002                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
9003                 for _ in 0..pending_outbound_htlc_count {
9004                         pending_outbound_htlcs.push(OutboundHTLCOutput {
9005                                 htlc_id: Readable::read(reader)?,
9006                                 amount_msat: Readable::read(reader)?,
9007                                 cltv_expiry: Readable::read(reader)?,
9008                                 payment_hash: Readable::read(reader)?,
9009                                 source: Readable::read(reader)?,
9010                                 state: match <u8 as Readable>::read(reader)? {
9011                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
9012                                         1 => OutboundHTLCState::Committed,
9013                                         2 => {
9014                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
9015                                                 OutboundHTLCState::RemoteRemoved(option.into())
9016                                         },
9017                                         3 => {
9018                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
9019                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
9020                                         },
9021                                         4 => {
9022                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
9023                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
9024                                         },
9025                                         _ => return Err(DecodeError::InvalidValue),
9026                                 },
9027                                 skimmed_fee_msat: None,
9028                                 blinding_point: None,
9029                         });
9030                 }
9031
9032                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
9033                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
9034                 for _ in 0..holding_cell_htlc_update_count {
9035                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
9036                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
9037                                         amount_msat: Readable::read(reader)?,
9038                                         cltv_expiry: Readable::read(reader)?,
9039                                         payment_hash: Readable::read(reader)?,
9040                                         source: Readable::read(reader)?,
9041                                         onion_routing_packet: Readable::read(reader)?,
9042                                         skimmed_fee_msat: None,
9043                                         blinding_point: None,
9044                                 },
9045                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
9046                                         payment_preimage: Readable::read(reader)?,
9047                                         htlc_id: Readable::read(reader)?,
9048                                 },
9049                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
9050                                         htlc_id: Readable::read(reader)?,
9051                                         err_packet: Readable::read(reader)?,
9052                                 },
9053                                 _ => return Err(DecodeError::InvalidValue),
9054                         });
9055                 }
9056
9057                 let resend_order = match <u8 as Readable>::read(reader)? {
9058                         0 => RAACommitmentOrder::CommitmentFirst,
9059                         1 => RAACommitmentOrder::RevokeAndACKFirst,
9060                         _ => return Err(DecodeError::InvalidValue),
9061                 };
9062
9063                 let monitor_pending_channel_ready = Readable::read(reader)?;
9064                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
9065                 let monitor_pending_commitment_signed = Readable::read(reader)?;
9066
9067                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
9068                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
9069                 for _ in 0..monitor_pending_forwards_count {
9070                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
9071                 }
9072
9073                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
9074                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
9075                 for _ in 0..monitor_pending_failures_count {
9076                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
9077                 }
9078
9079                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
9080
9081                 let holding_cell_update_fee = Readable::read(reader)?;
9082
9083                 let next_holder_htlc_id = Readable::read(reader)?;
9084                 let next_counterparty_htlc_id = Readable::read(reader)?;
9085                 let update_time_counter = Readable::read(reader)?;
9086                 let feerate_per_kw = Readable::read(reader)?;
9087
9088                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
9089                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
9090                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
9091                 // consider the stale state on reload.
9092                 match <u8 as Readable>::read(reader)? {
9093                         0 => {},
9094                         1 => {
9095                                 let _: u32 = Readable::read(reader)?;
9096                                 let _: u64 = Readable::read(reader)?;
9097                                 let _: Signature = Readable::read(reader)?;
9098                         },
9099                         _ => return Err(DecodeError::InvalidValue),
9100                 }
9101
9102                 let funding_tx_confirmed_in = Readable::read(reader)?;
9103                 let funding_tx_confirmation_height = Readable::read(reader)?;
9104                 let short_channel_id = Readable::read(reader)?;
9105
9106                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
9107                 let holder_dust_limit_satoshis = Readable::read(reader)?;
9108                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
9109                 let mut counterparty_selected_channel_reserve_satoshis = None;
9110                 if ver == 1 {
9111                         // Read the old serialization from version 0.0.98.
9112                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
9113                 } else {
9114                         // Read the 8 bytes of backwards-compatibility data.
9115                         let _dummy: u64 = Readable::read(reader)?;
9116                 }
9117                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
9118                 let holder_htlc_minimum_msat = Readable::read(reader)?;
9119                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
9120
9121                 let mut minimum_depth = None;
9122                 if ver == 1 {
9123                         // Read the old serialization from version 0.0.98.
9124                         minimum_depth = Some(Readable::read(reader)?);
9125                 } else {
9126                         // Read the 4 bytes of backwards-compatibility data.
9127                         let _dummy: u32 = Readable::read(reader)?;
9128                 }
9129
9130                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
9131                         0 => None,
9132                         1 => Some(CounterpartyForwardingInfo {
9133                                 fee_base_msat: Readable::read(reader)?,
9134                                 fee_proportional_millionths: Readable::read(reader)?,
9135                                 cltv_expiry_delta: Readable::read(reader)?,
9136                         }),
9137                         _ => return Err(DecodeError::InvalidValue),
9138                 };
9139
9140                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
9141                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
9142
9143                 let counterparty_cur_commitment_point = Readable::read(reader)?;
9144
9145                 let counterparty_prev_commitment_point = Readable::read(reader)?;
9146                 let counterparty_node_id = Readable::read(reader)?;
9147
9148                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
9149                 let commitment_secrets = Readable::read(reader)?;
9150
9151                 let channel_update_status = Readable::read(reader)?;
9152
9153                 #[cfg(any(test, fuzzing))]
9154                 let mut historical_inbound_htlc_fulfills = new_hash_set();
9155                 #[cfg(any(test, fuzzing))]
9156                 {
9157                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
9158                         for _ in 0..htlc_fulfills_len {
9159                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
9160                         }
9161                 }
9162
9163                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
9164                         Some((feerate, if channel_parameters.is_outbound_from_holder {
9165                                 FeeUpdateState::Outbound
9166                         } else {
9167                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
9168                         }))
9169                 } else {
9170                         None
9171                 };
9172
9173                 let mut announcement_sigs = None;
9174                 let mut target_closing_feerate_sats_per_kw = None;
9175                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
9176                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
9177                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
9178                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
9179                 // only, so we default to that if none was written.
9180                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
9181                 let mut channel_creation_height = Some(serialized_height);
9182                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
9183
9184                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
9185                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
9186                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
9187                 let mut latest_inbound_scid_alias = None;
9188                 let mut outbound_scid_alias = None;
9189                 let mut channel_pending_event_emitted = None;
9190                 let mut channel_ready_event_emitted = None;
9191
9192                 let mut user_id_high_opt: Option<u64> = None;
9193                 let mut channel_keys_id: Option<[u8; 32]> = None;
9194                 let mut temporary_channel_id: Option<ChannelId> = None;
9195                 let mut holder_max_accepted_htlcs: Option<u16> = None;
9196
9197                 let mut blocked_monitor_updates = Some(Vec::new());
9198
9199                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
9200                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
9201
9202                 let mut is_batch_funding: Option<()> = None;
9203
9204                 let mut local_initiated_shutdown: Option<()> = None;
9205
9206                 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
9207                 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
9208
9209                 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
9210                 let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
9211
9212                 let mut cur_holder_commitment_point_opt: Option<PublicKey> = None;
9213                 let mut next_holder_commitment_point_opt: Option<PublicKey> = None;
9214
9215                 read_tlv_fields!(reader, {
9216                         (0, announcement_sigs, option),
9217                         (1, minimum_depth, option),
9218                         (2, channel_type, option),
9219                         (3, counterparty_selected_channel_reserve_satoshis, option),
9220                         (4, holder_selected_channel_reserve_satoshis, option),
9221                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
9222                         (6, holder_max_htlc_value_in_flight_msat, option),
9223                         (7, shutdown_scriptpubkey, option),
9224                         (8, blocked_monitor_updates, optional_vec),
9225                         (9, target_closing_feerate_sats_per_kw, option),
9226                         (10, monitor_pending_update_adds, option), // Added in 0.0.122
9227                         (11, monitor_pending_finalized_fulfills, optional_vec),
9228                         (13, channel_creation_height, option),
9229                         (15, preimages_opt, optional_vec),
9230                         (17, announcement_sigs_state, option),
9231                         (19, latest_inbound_scid_alias, option),
9232                         (21, outbound_scid_alias, option),
9233                         (23, channel_ready_event_emitted, option),
9234                         (25, user_id_high_opt, option),
9235                         (27, channel_keys_id, option),
9236                         (28, holder_max_accepted_htlcs, option),
9237                         (29, temporary_channel_id, option),
9238                         (31, channel_pending_event_emitted, option),
9239                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
9240                         (37, holding_cell_skimmed_fees_opt, optional_vec),
9241                         (38, is_batch_funding, option),
9242                         (39, pending_outbound_blinding_points_opt, optional_vec),
9243                         (41, holding_cell_blinding_points_opt, optional_vec),
9244                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
9245                         (45, cur_holder_commitment_point_opt, option),
9246                         (47, next_holder_commitment_point_opt, option),
9247                         (49, local_initiated_shutdown, option),
9248                 });
9249
9250                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
9251                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
9252                         // If we've gotten to the funding stage of the channel, populate the signer with its
9253                         // required channel parameters.
9254                         if channel_state >= ChannelState::FundingNegotiated {
9255                                 holder_signer.provide_channel_parameters(&channel_parameters);
9256                         }
9257                         (channel_keys_id, holder_signer)
9258                 } else {
9259                         // `keys_data` can be `None` if we had corrupted data.
9260                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
9261                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
9262                         (holder_signer.channel_keys_id(), holder_signer)
9263                 };
9264
9265                 if let Some(preimages) = preimages_opt {
9266                         let mut iter = preimages.into_iter();
9267                         for htlc in pending_outbound_htlcs.iter_mut() {
9268                                 match &htlc.state {
9269                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
9270                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
9271                                         }
9272                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
9273                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
9274                                         }
9275                                         _ => {}
9276                                 }
9277                         }
9278                         // We expect all preimages to be consumed above
9279                         if iter.next().is_some() {
9280                                 return Err(DecodeError::InvalidValue);
9281                         }
9282                 }
9283
9284                 let chan_features = channel_type.as_ref().unwrap();
9285                 if !chan_features.is_subset(our_supported_features) {
9286                         // If the channel was written by a new version and negotiated with features we don't
9287                         // understand yet, refuse to read it.
9288                         return Err(DecodeError::UnknownRequiredFeature);
9289                 }
9290
9291                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
9292                 // To account for that, we're proactively setting/overriding the field here.
9293                 channel_parameters.channel_type_features = chan_features.clone();
9294
9295                 let mut secp_ctx = Secp256k1::new();
9296                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
9297
9298                 // `user_id` used to be a single u64 value. In order to remain backwards
9299                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
9300                 // separate u64 values.
9301                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
9302
9303                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
9304
9305                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
9306                         let mut iter = skimmed_fees.into_iter();
9307                         for htlc in pending_outbound_htlcs.iter_mut() {
9308                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9309                         }
9310                         // We expect all skimmed fees to be consumed above
9311                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9312                 }
9313                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
9314                         let mut iter = skimmed_fees.into_iter();
9315                         for htlc in holding_cell_htlc_updates.iter_mut() {
9316                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
9317                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9318                                 }
9319                         }
9320                         // We expect all skimmed fees to be consumed above
9321                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9322                 }
9323                 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
9324                         let mut iter = blinding_pts.into_iter();
9325                         for htlc in pending_outbound_htlcs.iter_mut() {
9326                                 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9327                         }
9328                         // We expect all blinding points to be consumed above
9329                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9330                 }
9331                 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
9332                         let mut iter = blinding_pts.into_iter();
9333                         for htlc in holding_cell_htlc_updates.iter_mut() {
9334                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
9335                                         *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9336                                 }
9337                         }
9338                         // We expect all blinding points to be consumed above
9339                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9340                 }
9341
9342                 if let Some(malformed_htlcs) = malformed_htlcs {
9343                         for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
9344                                 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
9345                                         if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
9346                                                 let matches = *htlc_id == malformed_htlc_id;
9347                                                 if matches { debug_assert!(err_packet.data.is_empty()) }
9348                                                 matches
9349                                         } else { false }
9350                                 }).ok_or(DecodeError::InvalidValue)?;
9351                                 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
9352                                         htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
9353                                 };
9354                                 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
9355                         }
9356                 }
9357
9358                 // If we're restoring this channel for the first time after an upgrade, then we require that the
9359                 // signer be available so that we can immediately populate the current commitment point. Channel
9360                 // restoration will fail if this is not possible.
9361                 let holder_commitment_point = match (cur_holder_commitment_point_opt, next_holder_commitment_point_opt) {
9362                         (Some(current), Some(next)) => HolderCommitmentPoint::Available {
9363                                 transaction_number: cur_holder_commitment_transaction_number, current, next
9364                         },
9365                         (Some(current), _) => HolderCommitmentPoint::Available {
9366                                 transaction_number: cur_holder_commitment_transaction_number, current,
9367                                 next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx),
9368                         },
9369                         (_, _) => HolderCommitmentPoint::Available {
9370                                 transaction_number: cur_holder_commitment_transaction_number,
9371                                 current: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number, &secp_ctx),
9372                                 next: holder_signer.get_per_commitment_point(cur_holder_commitment_transaction_number - 1, &secp_ctx),
9373                         },
9374                 };
9375
9376                 Ok(Channel {
9377                         context: ChannelContext {
9378                                 user_id,
9379
9380                                 config: config.unwrap(),
9381
9382                                 prev_config: None,
9383
9384                                 // Note that we don't care about serializing handshake limits as we only ever serialize
9385                                 // channel data after the handshake has completed.
9386                                 inbound_handshake_limits_override: None,
9387
9388                                 channel_id,
9389                                 temporary_channel_id,
9390                                 channel_state,
9391                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
9392                                 secp_ctx,
9393                                 channel_value_satoshis,
9394
9395                                 latest_monitor_update_id,
9396
9397                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
9398                                 shutdown_scriptpubkey,
9399                                 destination_script,
9400
9401                                 holder_commitment_point,
9402                                 cur_holder_commitment_transaction_number,
9403                                 cur_counterparty_commitment_transaction_number,
9404                                 value_to_self_msat,
9405
9406                                 holder_max_accepted_htlcs,
9407                                 pending_inbound_htlcs,
9408                                 pending_outbound_htlcs,
9409                                 holding_cell_htlc_updates,
9410
9411                                 resend_order,
9412
9413                                 monitor_pending_channel_ready,
9414                                 monitor_pending_revoke_and_ack,
9415                                 monitor_pending_commitment_signed,
9416                                 monitor_pending_forwards,
9417                                 monitor_pending_failures,
9418                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
9419                                 monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()),
9420
9421                                 signer_pending_commitment_update: false,
9422                                 signer_pending_funding: false,
9423
9424                                 pending_update_fee,
9425                                 holding_cell_update_fee,
9426                                 next_holder_htlc_id,
9427                                 next_counterparty_htlc_id,
9428                                 update_time_counter,
9429                                 feerate_per_kw,
9430
9431                                 #[cfg(debug_assertions)]
9432                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
9433                                 #[cfg(debug_assertions)]
9434                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
9435
9436                                 last_sent_closing_fee: None,
9437                                 pending_counterparty_closing_signed: None,
9438                                 expecting_peer_commitment_signed: false,
9439                                 closing_fee_limits: None,
9440                                 target_closing_feerate_sats_per_kw,
9441
9442                                 funding_tx_confirmed_in,
9443                                 funding_tx_confirmation_height,
9444                                 short_channel_id,
9445                                 channel_creation_height: channel_creation_height.unwrap(),
9446
9447                                 counterparty_dust_limit_satoshis,
9448                                 holder_dust_limit_satoshis,
9449                                 counterparty_max_htlc_value_in_flight_msat,
9450                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
9451                                 counterparty_selected_channel_reserve_satoshis,
9452                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
9453                                 counterparty_htlc_minimum_msat,
9454                                 holder_htlc_minimum_msat,
9455                                 counterparty_max_accepted_htlcs,
9456                                 minimum_depth,
9457
9458                                 counterparty_forwarding_info,
9459
9460                                 channel_transaction_parameters: channel_parameters,
9461                                 funding_transaction,
9462                                 is_batch_funding,
9463
9464                                 counterparty_cur_commitment_point,
9465                                 counterparty_prev_commitment_point,
9466                                 counterparty_node_id,
9467
9468                                 counterparty_shutdown_scriptpubkey,
9469
9470                                 commitment_secrets,
9471
9472                                 channel_update_status,
9473                                 closing_signed_in_flight: false,
9474
9475                                 announcement_sigs,
9476
9477                                 #[cfg(any(test, fuzzing))]
9478                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
9479                                 #[cfg(any(test, fuzzing))]
9480                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
9481
9482                                 workaround_lnd_bug_4006: None,
9483                                 sent_message_awaiting_response: None,
9484
9485                                 latest_inbound_scid_alias,
9486                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
9487                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
9488
9489                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
9490                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
9491
9492                                 #[cfg(any(test, fuzzing))]
9493                                 historical_inbound_htlc_fulfills,
9494
9495                                 channel_type: channel_type.unwrap(),
9496                                 channel_keys_id,
9497
9498                                 local_initiated_shutdown,
9499
9500                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
9501                         },
9502                         #[cfg(any(dual_funding, splicing))]
9503                         dual_funding_channel_context: None,
9504                 })
9505         }
9506 }
9507
9508 #[cfg(test)]
9509 mod tests {
9510         use std::cmp;
9511         use bitcoin::blockdata::constants::ChainHash;
9512         use bitcoin::blockdata::script::{ScriptBuf, Builder};
9513         use bitcoin::blockdata::transaction::{Transaction, TxOut};
9514         use bitcoin::blockdata::opcodes;
9515         use bitcoin::network::constants::Network;
9516         use crate::ln::onion_utils::INVALID_ONION_BLINDING;
9517         use crate::ln::types::{PaymentHash, PaymentPreimage};
9518         use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
9519         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
9520         use crate::ln::channel::InitFeatures;
9521         use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
9522         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
9523         use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
9524         use crate::ln::msgs;
9525         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
9526         use crate::ln::script::ShutdownScript;
9527         use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
9528         use crate::chain::BestBlock;
9529         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
9530         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
9531         use crate::chain::transaction::OutPoint;
9532         use crate::routing::router::{Path, RouteHop};
9533         use crate::util::config::UserConfig;
9534         use crate::util::errors::APIError;
9535         use crate::util::ser::{ReadableArgs, Writeable};
9536         use crate::util::test_utils;
9537         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
9538         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
9539         use bitcoin::secp256k1::ffi::Signature as FFISignature;
9540         use bitcoin::secp256k1::{SecretKey,PublicKey};
9541         use bitcoin::hashes::sha256::Hash as Sha256;
9542         use bitcoin::hashes::Hash;
9543         use bitcoin::hashes::hex::FromHex;
9544         use bitcoin::hash_types::WPubkeyHash;
9545         use bitcoin::blockdata::locktime::absolute::LockTime;
9546         use bitcoin::address::{WitnessProgram, WitnessVersion};
9547         use crate::prelude::*;
9548
9549         #[test]
9550         fn test_channel_state_order() {
9551                 use crate::ln::channel::NegotiatingFundingFlags;
9552                 use crate::ln::channel::AwaitingChannelReadyFlags;
9553                 use crate::ln::channel::ChannelReadyFlags;
9554
9555                 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
9556                 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
9557                 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
9558                 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
9559         }
9560
9561         struct TestFeeEstimator {
9562                 fee_est: u32
9563         }
9564         impl FeeEstimator for TestFeeEstimator {
9565                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
9566                         self.fee_est
9567                 }
9568         }
9569
9570         #[test]
9571         fn test_max_funding_satoshis_no_wumbo() {
9572                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
9573                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
9574                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
9575         }
9576
9577         struct Keys {
9578                 signer: InMemorySigner,
9579         }
9580
9581         impl EntropySource for Keys {
9582                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
9583         }
9584
9585         impl SignerProvider for Keys {
9586                 type EcdsaSigner = InMemorySigner;
9587                 #[cfg(taproot)]
9588                 type TaprootSigner = InMemorySigner;
9589
9590                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
9591                         self.signer.channel_keys_id()
9592                 }
9593
9594                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
9595                         self.signer.clone()
9596                 }
9597
9598                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
9599
9600                 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
9601                         let secp_ctx = Secp256k1::signing_only();
9602                         let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9603                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
9604                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
9605                 }
9606
9607                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
9608                         let secp_ctx = Secp256k1::signing_only();
9609                         let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9610                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
9611                 }
9612         }
9613
9614         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9615         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
9616                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
9617         }
9618
9619         #[test]
9620         fn upfront_shutdown_script_incompatibility() {
9621                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
9622                 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
9623                         &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
9624                 ).unwrap();
9625
9626                 let seed = [42; 32];
9627                 let network = Network::Testnet;
9628                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9629                 keys_provider.expect(OnGetShutdownScriptpubkey {
9630                         returns: non_v0_segwit_shutdown_script.clone(),
9631                 });
9632
9633                 let secp_ctx = Secp256k1::new();
9634                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9635                 let config = UserConfig::default();
9636                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
9637                         Err(APIError::IncompatibleShutdownScript { script }) => {
9638                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
9639                         },
9640                         Err(e) => panic!("Unexpected error: {:?}", e),
9641                         Ok(_) => panic!("Expected error"),
9642                 }
9643         }
9644
9645         // Check that, during channel creation, we use the same feerate in the open channel message
9646         // as we do in the Channel object creation itself.
9647         #[test]
9648         fn test_open_channel_msg_fee() {
9649                 let original_fee = 253;
9650                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
9651                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
9652                 let secp_ctx = Secp256k1::new();
9653                 let seed = [42; 32];
9654                 let network = Network::Testnet;
9655                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9656
9657                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9658                 let config = UserConfig::default();
9659                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9660
9661                 // Now change the fee so we can check that the fee in the open_channel message is the
9662                 // same as the old fee.
9663                 fee_est.fee_est = 500;
9664                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9665                 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
9666         }
9667
9668         #[test]
9669         fn test_holder_vs_counterparty_dust_limit() {
9670                 // Test that when calculating the local and remote commitment transaction fees, the correct
9671                 // dust limits are used.
9672                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9673                 let secp_ctx = Secp256k1::new();
9674                 let seed = [42; 32];
9675                 let network = Network::Testnet;
9676                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9677                 let logger = test_utils::TestLogger::new();
9678                 let best_block = BestBlock::from_network(network);
9679
9680                 // Go through the flow of opening a channel between two nodes, making sure
9681                 // they have different dust limits.
9682
9683                 // Create Node A's channel pointing to Node B's pubkey
9684                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9685                 let config = UserConfig::default();
9686                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9687
9688                 // Create Node B's channel by receiving Node A's open_channel message
9689                 // Make sure A's dust limit is as we expect.
9690                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9691                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9692                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9693
9694                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9695                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9696                 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9697                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9698                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9699
9700                 // Node A --> Node B: funding created
9701                 let output_script = node_a_chan.context.get_funding_redeemscript();
9702                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9703                         value: 10000000, script_pubkey: output_script.clone(),
9704                 }]};
9705                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9706                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9707                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9708
9709                 // Node B --> Node A: funding signed
9710                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9711                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9712
9713                 // Put some inbound and outbound HTLCs in A's channel.
9714                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9715                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9716                         htlc_id: 0,
9717                         amount_msat: htlc_amount_msat,
9718                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9719                         cltv_expiry: 300000000,
9720                         state: InboundHTLCState::Committed,
9721                 });
9722
9723                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9724                         htlc_id: 1,
9725                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9726                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9727                         cltv_expiry: 200000000,
9728                         state: OutboundHTLCState::Committed,
9729                         source: HTLCSource::OutboundRoute {
9730                                 path: Path { hops: Vec::new(), blinded_tail: None },
9731                                 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9732                                 first_hop_htlc_msat: 548,
9733                                 payment_id: PaymentId([42; 32]),
9734                         },
9735                         skimmed_fee_msat: None,
9736                         blinding_point: None,
9737                 });
9738
9739                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9740                 // the dust limit check.
9741                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9742                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9743                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9744                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9745
9746                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9747                 // of the HTLCs are seen to be above the dust limit.
9748                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9749                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9750                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9751                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9752                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9753         }
9754
9755         #[test]
9756         fn test_timeout_vs_success_htlc_dust_limit() {
9757                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9758                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9759                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9760                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9761                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9762                 let secp_ctx = Secp256k1::new();
9763                 let seed = [42; 32];
9764                 let network = Network::Testnet;
9765                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9766
9767                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9768                 let config = UserConfig::default();
9769                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9770
9771                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9772                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9773
9774                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9775                 // counted as dust when it shouldn't be.
9776                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9777                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9778                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9779                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9780
9781                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9782                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9783                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9784                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9785                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9786
9787                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9788
9789                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9790                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9791                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9792                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9793                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9794
9795                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9796                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9797                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9798                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9799                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9800         }
9801
9802         #[test]
9803         fn channel_reestablish_no_updates() {
9804                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9805                 let logger = test_utils::TestLogger::new();
9806                 let secp_ctx = Secp256k1::new();
9807                 let seed = [42; 32];
9808                 let network = Network::Testnet;
9809                 let best_block = BestBlock::from_network(network);
9810                 let chain_hash = ChainHash::using_genesis_block(network);
9811                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9812
9813                 // Go through the flow of opening a channel between two nodes.
9814
9815                 // Create Node A's channel pointing to Node B's pubkey
9816                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9817                 let config = UserConfig::default();
9818                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9819
9820                 // Create Node B's channel by receiving Node A's open_channel message
9821                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9822                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9823                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9824
9825                 // Node B --> Node A: accept channel
9826                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9827                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9828
9829                 // Node A --> Node B: funding created
9830                 let output_script = node_a_chan.context.get_funding_redeemscript();
9831                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9832                         value: 10000000, script_pubkey: output_script.clone(),
9833                 }]};
9834                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9835                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9836                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9837
9838                 // Node B --> Node A: funding signed
9839                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9840                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9841
9842                 // Now disconnect the two nodes and check that the commitment point in
9843                 // Node B's channel_reestablish message is sane.
9844                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9845                 let msg = node_b_chan.get_channel_reestablish(&&logger);
9846                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9847                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9848                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9849
9850                 // Check that the commitment point in Node A's channel_reestablish message
9851                 // is sane.
9852                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9853                 let msg = node_a_chan.get_channel_reestablish(&&logger);
9854                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9855                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9856                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9857         }
9858
9859         #[test]
9860         fn test_configured_holder_max_htlc_value_in_flight() {
9861                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9862                 let logger = test_utils::TestLogger::new();
9863                 let secp_ctx = Secp256k1::new();
9864                 let seed = [42; 32];
9865                 let network = Network::Testnet;
9866                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9867                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9868                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9869
9870                 let mut config_2_percent = UserConfig::default();
9871                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9872                 let mut config_99_percent = UserConfig::default();
9873                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9874                 let mut config_0_percent = UserConfig::default();
9875                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9876                 let mut config_101_percent = UserConfig::default();
9877                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9878
9879                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9880                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9881                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9882                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9883                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9884                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9885
9886                 // Test with the upper bound - 1 of valid values (99%).
9887                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9888                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9889                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9890
9891                 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9892
9893                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9894                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9895                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9896                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9897                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9898                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9899
9900                 // Test with the upper bound - 1 of valid values (99%).
9901                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9902                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9903                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9904
9905                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9906                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9907                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9908                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9909                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9910
9911                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9912                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9913                 // than 100.
9914                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9915                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9916                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9917
9918                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9919                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9920                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9921                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9922                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9923
9924                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9925                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9926                 // than 100.
9927                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9928                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9929                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9930         }
9931
9932         #[test]
9933         fn test_configured_holder_selected_channel_reserve_satoshis() {
9934
9935                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9936                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9937                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9938
9939                 // Test with valid but unreasonably high channel reserves
9940                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9941                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9942                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9943
9944                 // Test with calculated channel reserve less than lower bound
9945                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9946                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9947
9948                 // Test with invalid channel reserves since sum of both is greater than or equal
9949                 // to channel value
9950                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9951                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9952         }
9953
9954         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9955                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9956                 let logger = test_utils::TestLogger::new();
9957                 let secp_ctx = Secp256k1::new();
9958                 let seed = [42; 32];
9959                 let network = Network::Testnet;
9960                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9961                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9962                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9963
9964
9965                 let mut outbound_node_config = UserConfig::default();
9966                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9967                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9968
9969                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9970                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9971
9972                 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9973                 let mut inbound_node_config = UserConfig::default();
9974                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9975
9976                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9977                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9978
9979                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9980
9981                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9982                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9983                 } else {
9984                         // Channel Negotiations failed
9985                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9986                         assert!(result.is_err());
9987                 }
9988         }
9989
9990         #[test]
9991         fn channel_update() {
9992                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9993                 let logger = test_utils::TestLogger::new();
9994                 let secp_ctx = Secp256k1::new();
9995                 let seed = [42; 32];
9996                 let network = Network::Testnet;
9997                 let best_block = BestBlock::from_network(network);
9998                 let chain_hash = ChainHash::using_genesis_block(network);
9999                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10000
10001                 // Create Node A's channel pointing to Node B's pubkey
10002                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10003                 let config = UserConfig::default();
10004                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10005
10006                 // Create Node B's channel by receiving Node A's open_channel message
10007                 // Make sure A's dust limit is as we expect.
10008                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10009                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10010                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
10011
10012                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
10013                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
10014                 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
10015                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
10016                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
10017
10018                 // Node A --> Node B: funding created
10019                 let output_script = node_a_chan.context.get_funding_redeemscript();
10020                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
10021                         value: 10000000, script_pubkey: output_script.clone(),
10022                 }]};
10023                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10024                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
10025                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
10026
10027                 // Node B --> Node A: funding signed
10028                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
10029                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10030
10031                 // Make sure that receiving a channel update will update the Channel as expected.
10032                 let update = ChannelUpdate {
10033                         contents: UnsignedChannelUpdate {
10034                                 chain_hash,
10035                                 short_channel_id: 0,
10036                                 timestamp: 0,
10037                                 flags: 0,
10038                                 cltv_expiry_delta: 100,
10039                                 htlc_minimum_msat: 5,
10040                                 htlc_maximum_msat: MAX_VALUE_MSAT,
10041                                 fee_base_msat: 110,
10042                                 fee_proportional_millionths: 11,
10043                                 excess_data: Vec::new(),
10044                         },
10045                         signature: Signature::from(unsafe { FFISignature::new() })
10046                 };
10047                 assert!(node_a_chan.channel_update(&update).unwrap());
10048
10049                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
10050                 // change our official htlc_minimum_msat.
10051                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
10052                 match node_a_chan.context.counterparty_forwarding_info() {
10053                         Some(info) => {
10054                                 assert_eq!(info.cltv_expiry_delta, 100);
10055                                 assert_eq!(info.fee_base_msat, 110);
10056                                 assert_eq!(info.fee_proportional_millionths, 11);
10057                         },
10058                         None => panic!("expected counterparty forwarding info to be Some")
10059                 }
10060
10061                 assert!(!node_a_chan.channel_update(&update).unwrap());
10062         }
10063
10064         #[test]
10065         fn blinding_point_skimmed_fee_malformed_ser() {
10066                 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
10067                 // properly.
10068                 let logger = test_utils::TestLogger::new();
10069                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10070                 let secp_ctx = Secp256k1::new();
10071                 let seed = [42; 32];
10072                 let network = Network::Testnet;
10073                 let best_block = BestBlock::from_network(network);
10074                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10075
10076                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10077                 let config = UserConfig::default();
10078                 let features = channelmanager::provided_init_features(&config);
10079                 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10080                         &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
10081                 ).unwrap();
10082                 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
10083                         &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10084                         &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
10085                 ).unwrap();
10086                 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
10087                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
10088                         value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
10089                 }]};
10090                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10091                 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
10092                 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
10093                         Ok((chan, _, _)) => chan,
10094                         Err((_, e)) => panic!("{}", e),
10095                 };
10096
10097                 let dummy_htlc_source = HTLCSource::OutboundRoute {
10098                         path: Path {
10099                                 hops: vec![RouteHop {
10100                                         pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
10101                                         node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
10102                                         cltv_expiry_delta: 0, maybe_announced_channel: false,
10103                                 }],
10104                                 blinded_tail: None
10105                         },
10106                         session_priv: test_utils::privkey(42),
10107                         first_hop_htlc_msat: 0,
10108                         payment_id: PaymentId([42; 32]),
10109                 };
10110                 let dummy_outbound_output = OutboundHTLCOutput {
10111                         htlc_id: 0,
10112                         amount_msat: 0,
10113                         payment_hash: PaymentHash([43; 32]),
10114                         cltv_expiry: 0,
10115                         state: OutboundHTLCState::Committed,
10116                         source: dummy_htlc_source.clone(),
10117                         skimmed_fee_msat: None,
10118                         blinding_point: None,
10119                 };
10120                 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
10121                 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
10122                         if idx % 2 == 0 {
10123                                 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
10124                         }
10125                         if idx % 3 == 0 {
10126                                 htlc.skimmed_fee_msat = Some(1);
10127                         }
10128                 }
10129                 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
10130
10131                 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
10132                         amount_msat: 0,
10133                         cltv_expiry: 0,
10134                         payment_hash: PaymentHash([43; 32]),
10135                         source: dummy_htlc_source.clone(),
10136                         onion_routing_packet: msgs::OnionPacket {
10137                                 version: 0,
10138                                 public_key: Ok(test_utils::pubkey(1)),
10139                                 hop_data: [0; 20*65],
10140                                 hmac: [0; 32]
10141                         },
10142                         skimmed_fee_msat: None,
10143                         blinding_point: None,
10144                 };
10145                 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
10146                         payment_preimage: PaymentPreimage([42; 32]),
10147                         htlc_id: 0,
10148                 };
10149                 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
10150                         htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
10151                 };
10152                 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
10153                         htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
10154                 };
10155                 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
10156                 for i in 0..12 {
10157                         if i % 5 == 0 {
10158                                 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
10159                         } else if i % 5 == 1 {
10160                                 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
10161                         } else if i % 5 == 2 {
10162                                 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
10163                                 if let HTLCUpdateAwaitingACK::AddHTLC {
10164                                         ref mut blinding_point, ref mut skimmed_fee_msat, ..
10165                                 } = &mut dummy_add {
10166                                         *blinding_point = Some(test_utils::pubkey(42 + i));
10167                                         *skimmed_fee_msat = Some(42);
10168                                 } else { panic!() }
10169                                 holding_cell_htlc_updates.push(dummy_add);
10170                         } else if i % 5 == 3 {
10171                                 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
10172                         } else {
10173                                 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
10174                         }
10175                 }
10176                 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
10177
10178                 // Encode and decode the channel and ensure that the HTLCs within are the same.
10179                 let encoded_chan = chan.encode();
10180                 let mut s = crate::io::Cursor::new(&encoded_chan);
10181                 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
10182                 let features = channelmanager::provided_channel_type_features(&config);
10183                 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
10184                 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
10185                 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
10186         }
10187
10188         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
10189         #[test]
10190         fn outbound_commitment_test() {
10191                 use bitcoin::sighash;
10192                 use bitcoin::consensus::encode::serialize;
10193                 use bitcoin::sighash::EcdsaSighashType;
10194                 use bitcoin::hashes::hex::FromHex;
10195                 use bitcoin::hash_types::Txid;
10196                 use bitcoin::secp256k1::Message;
10197                 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
10198                 use crate::ln::PaymentPreimage;
10199                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
10200                 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
10201                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
10202                 use crate::util::logger::Logger;
10203                 use crate::sync::Arc;
10204                 use core::str::FromStr;
10205                 use hex::DisplayHex;
10206
10207                 // Test vectors from BOLT 3 Appendices C and F (anchors):
10208                 let feeest = TestFeeEstimator{fee_est: 15000};
10209                 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
10210                 let secp_ctx = Secp256k1::new();
10211
10212                 let mut signer = InMemorySigner::new(
10213                         &secp_ctx,
10214                         SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
10215                         SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
10216                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
10217                         SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
10218                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
10219
10220                         // These aren't set in the test vectors:
10221                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
10222                         10_000_000,
10223                         [0; 32],
10224                         [0; 32],
10225                 );
10226
10227                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
10228                                 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
10229                 let keys_provider = Keys { signer: signer.clone() };
10230
10231                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10232                 let mut config = UserConfig::default();
10233                 config.channel_handshake_config.announced_channel = false;
10234                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
10235                 chan.context.holder_dust_limit_satoshis = 546;
10236                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
10237
10238                 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
10239
10240                 let counterparty_pubkeys = ChannelPublicKeys {
10241                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
10242                         revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
10243                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
10244                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
10245                         htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
10246                 };
10247                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
10248                         CounterpartyChannelTransactionParameters {
10249                                 pubkeys: counterparty_pubkeys.clone(),
10250                                 selected_contest_delay: 144
10251                         });
10252                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
10253                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
10254
10255                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
10256                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
10257
10258                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
10259                            <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
10260
10261                 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
10262                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
10263
10264                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
10265                 // derived from a commitment_seed, so instead we copy it here and call
10266                 // build_commitment_transaction.
10267                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
10268                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10269                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10270                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
10271                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
10272
10273                 macro_rules! test_commitment {
10274                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
10275                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10276                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
10277                         };
10278                 }
10279
10280                 macro_rules! test_commitment_with_anchors {
10281                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
10282                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10283                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
10284                         };
10285                 }
10286
10287                 macro_rules! test_commitment_common {
10288                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
10289                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
10290                         } ) => { {
10291                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
10292                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
10293
10294                                         let htlcs = commitment_stats.htlcs_included.drain(..)
10295                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
10296                                                 .collect();
10297                                         (commitment_stats.tx, htlcs)
10298                                 };
10299                                 let trusted_tx = commitment_tx.trust();
10300                                 let unsigned_tx = trusted_tx.built_transaction();
10301                                 let redeemscript = chan.context.get_funding_redeemscript();
10302                                 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
10303                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
10304                                 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
10305                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
10306
10307                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
10308                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
10309                                 let mut counterparty_htlc_sigs = Vec::new();
10310                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
10311                                 $({
10312                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10313                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
10314                                         counterparty_htlc_sigs.push(remote_signature);
10315                                 })*
10316                                 assert_eq!(htlcs.len(), per_htlc.len());
10317
10318                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
10319                                         commitment_tx.clone(),
10320                                         counterparty_signature,
10321                                         counterparty_htlc_sigs,
10322                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
10323                                         chan.context.counterparty_funding_pubkey()
10324                                 );
10325                                 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
10326                                 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
10327
10328                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
10329                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
10330                                 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
10331
10332                                 // ((htlc, counterparty_sig), (index, holder_sig))
10333                                 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
10334
10335                                 $({
10336                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
10337                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10338
10339                                         let ref htlc = htlcs[$htlc_idx];
10340                                         let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
10341                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
10342                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
10343                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
10344                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
10345                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
10346                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
10347
10348                                         let mut preimage: Option<PaymentPreimage> = None;
10349                                         if !htlc.offered {
10350                                                 for i in 0..5 {
10351                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
10352                                                         if out == htlc.payment_hash {
10353                                                                 preimage = Some(PaymentPreimage([i; 32]));
10354                                                         }
10355                                                 }
10356
10357                                                 assert!(preimage.is_some());
10358                                         }
10359
10360                                         let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
10361                                         let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
10362                                                 channel_derivation_parameters: ChannelDerivationParameters {
10363                                                         value_satoshis: chan.context.channel_value_satoshis,
10364                                                         keys_id: chan.context.channel_keys_id,
10365                                                         transaction_parameters: chan.context.channel_transaction_parameters.clone(),
10366                                                 },
10367                                                 commitment_txid: trusted_tx.txid(),
10368                                                 per_commitment_number: trusted_tx.commitment_number(),
10369                                                 per_commitment_point: trusted_tx.per_commitment_point(),
10370                                                 feerate_per_kw: trusted_tx.feerate_per_kw(),
10371                                                 htlc: htlc.clone(),
10372                                                 preimage: preimage.clone(),
10373                                                 counterparty_sig: *htlc_counterparty_sig,
10374                                         }, &secp_ctx).unwrap();
10375                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
10376                                         assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
10377
10378                                         let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
10379                                         assert_eq!(signature, htlc_holder_sig, "htlc sig");
10380                                         let trusted_tx = holder_commitment_tx.trust();
10381                                         htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
10382                                         log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
10383                                         assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
10384                                 })*
10385                                 assert!(htlc_counterparty_sig_iter.next().is_none());
10386                         } }
10387                 }
10388
10389                 // anchors: simple commitment tx with no HTLCs and single anchor
10390                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
10391                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
10392                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10393
10394                 // simple commitment tx with no HTLCs
10395                 chan.context.value_to_self_msat = 7000000000;
10396
10397                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
10398                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
10399                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10400
10401                 // anchors: simple commitment tx with no HTLCs
10402                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
10403                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
10404                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10405
10406                 chan.context.pending_inbound_htlcs.push({
10407                         let mut out = InboundHTLCOutput{
10408                                 htlc_id: 0,
10409                                 amount_msat: 1000000,
10410                                 cltv_expiry: 500,
10411                                 payment_hash: PaymentHash([0; 32]),
10412                                 state: InboundHTLCState::Committed,
10413                         };
10414                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
10415                         out
10416                 });
10417                 chan.context.pending_inbound_htlcs.push({
10418                         let mut out = InboundHTLCOutput{
10419                                 htlc_id: 1,
10420                                 amount_msat: 2000000,
10421                                 cltv_expiry: 501,
10422                                 payment_hash: PaymentHash([0; 32]),
10423                                 state: InboundHTLCState::Committed,
10424                         };
10425                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10426                         out
10427                 });
10428                 chan.context.pending_outbound_htlcs.push({
10429                         let mut out = OutboundHTLCOutput{
10430                                 htlc_id: 2,
10431                                 amount_msat: 2000000,
10432                                 cltv_expiry: 502,
10433                                 payment_hash: PaymentHash([0; 32]),
10434                                 state: OutboundHTLCState::Committed,
10435                                 source: HTLCSource::dummy(),
10436                                 skimmed_fee_msat: None,
10437                                 blinding_point: None,
10438                         };
10439                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
10440                         out
10441                 });
10442                 chan.context.pending_outbound_htlcs.push({
10443                         let mut out = OutboundHTLCOutput{
10444                                 htlc_id: 3,
10445                                 amount_msat: 3000000,
10446                                 cltv_expiry: 503,
10447                                 payment_hash: PaymentHash([0; 32]),
10448                                 state: OutboundHTLCState::Committed,
10449                                 source: HTLCSource::dummy(),
10450                                 skimmed_fee_msat: None,
10451                                 blinding_point: None,
10452                         };
10453                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
10454                         out
10455                 });
10456                 chan.context.pending_inbound_htlcs.push({
10457                         let mut out = InboundHTLCOutput{
10458                                 htlc_id: 4,
10459                                 amount_msat: 4000000,
10460                                 cltv_expiry: 504,
10461                                 payment_hash: PaymentHash([0; 32]),
10462                                 state: InboundHTLCState::Committed,
10463                         };
10464                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
10465                         out
10466                 });
10467
10468                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
10469                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10470                 chan.context.feerate_per_kw = 0;
10471
10472                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
10473                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
10474                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10475
10476                                   { 0,
10477                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
10478                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
10479                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10480
10481                                   { 1,
10482                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
10483                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
10484                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10485
10486                                   { 2,
10487                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
10488                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
10489                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10490
10491                                   { 3,
10492                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
10493                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
10494                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10495
10496                                   { 4,
10497                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
10498                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
10499                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10500                 } );
10501
10502                 // commitment tx with seven outputs untrimmed (maximum feerate)
10503                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10504                 chan.context.feerate_per_kw = 647;
10505
10506                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
10507                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
10508                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10509
10510                                   { 0,
10511                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
10512                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
10513                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10514
10515                                   { 1,
10516                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
10517                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
10518                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10519
10520                                   { 2,
10521                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
10522                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
10523                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10524
10525                                   { 3,
10526                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
10527                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
10528                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10529
10530                                   { 4,
10531                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
10532                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
10533                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10534                 } );
10535
10536                 // commitment tx with six outputs untrimmed (minimum feerate)
10537                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10538                 chan.context.feerate_per_kw = 648;
10539
10540                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
10541                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
10542                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10543
10544                                   { 0,
10545                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
10546                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
10547                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10548
10549                                   { 1,
10550                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
10551                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
10552                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10553
10554                                   { 2,
10555                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
10556                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
10557                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10558
10559                                   { 3,
10560                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
10561                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
10562                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10563                 } );
10564
10565                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
10566                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10567                 chan.context.feerate_per_kw = 645;
10568                 chan.context.holder_dust_limit_satoshis = 1001;
10569
10570                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
10571                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
10572                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10573
10574                                   { 0,
10575                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
10576                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
10577                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
10578
10579                                   { 1,
10580                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
10581                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
10582                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10583
10584                                   { 2,
10585                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
10586                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
10587                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10588
10589                                   { 3,
10590                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
10591                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
10592                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10593                 } );
10594
10595                 // commitment tx with six outputs untrimmed (maximum feerate)
10596                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10597                 chan.context.feerate_per_kw = 2069;
10598                 chan.context.holder_dust_limit_satoshis = 546;
10599
10600                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
10601                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
10602                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10603
10604                                   { 0,
10605                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
10606                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
10607                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10608
10609                                   { 1,
10610                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
10611                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
10612                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10613
10614                                   { 2,
10615                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
10616                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
10617                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10618
10619                                   { 3,
10620                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
10621                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
10622                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10623                 } );
10624
10625                 // commitment tx with five outputs untrimmed (minimum feerate)
10626                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10627                 chan.context.feerate_per_kw = 2070;
10628
10629                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
10630                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
10631                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10632
10633                                   { 0,
10634                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
10635                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
10636                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10637
10638                                   { 1,
10639                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
10640                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
10641                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10642
10643                                   { 2,
10644                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
10645                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
10646                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10647                 } );
10648
10649                 // commitment tx with five outputs untrimmed (maximum feerate)
10650                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10651                 chan.context.feerate_per_kw = 2194;
10652
10653                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
10654                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
10655                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10656
10657                                   { 0,
10658                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
10659                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
10660                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10661
10662                                   { 1,
10663                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
10664                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
10665                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10666
10667                                   { 2,
10668                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
10669                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
10670                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10671                 } );
10672
10673                 // commitment tx with four outputs untrimmed (minimum feerate)
10674                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10675                 chan.context.feerate_per_kw = 2195;
10676
10677                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
10678                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
10679                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10680
10681                                   { 0,
10682                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
10683                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
10684                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10685
10686                                   { 1,
10687                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
10688                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
10689                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10690                 } );
10691
10692                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
10693                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10694                 chan.context.feerate_per_kw = 2185;
10695                 chan.context.holder_dust_limit_satoshis = 2001;
10696                 let cached_channel_type = chan.context.channel_type;
10697                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10698
10699                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
10700                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
10701                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10702
10703                                   { 0,
10704                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
10705                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
10706                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10707
10708                                   { 1,
10709                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10710                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10711                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10712                 } );
10713
10714                 // commitment tx with four outputs untrimmed (maximum feerate)
10715                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10716                 chan.context.feerate_per_kw = 3702;
10717                 chan.context.holder_dust_limit_satoshis = 546;
10718                 chan.context.channel_type = cached_channel_type.clone();
10719
10720                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10721                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10722                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10723
10724                                   { 0,
10725                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10726                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10727                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10728
10729                                   { 1,
10730                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10731                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10732                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10733                 } );
10734
10735                 // commitment tx with three outputs untrimmed (minimum feerate)
10736                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10737                 chan.context.feerate_per_kw = 3703;
10738
10739                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10740                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10741                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10742
10743                                   { 0,
10744                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10745                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10746                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10747                 } );
10748
10749                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10750                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10751                 chan.context.feerate_per_kw = 3687;
10752                 chan.context.holder_dust_limit_satoshis = 3001;
10753                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10754
10755                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10756                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10757                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10758
10759                                   { 0,
10760                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10761                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10762                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10763                 } );
10764
10765                 // commitment tx with three outputs untrimmed (maximum feerate)
10766                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10767                 chan.context.feerate_per_kw = 4914;
10768                 chan.context.holder_dust_limit_satoshis = 546;
10769                 chan.context.channel_type = cached_channel_type.clone();
10770
10771                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10772                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10773                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10774
10775                                   { 0,
10776                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10777                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10778                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10779                 } );
10780
10781                 // commitment tx with two outputs untrimmed (minimum feerate)
10782                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10783                 chan.context.feerate_per_kw = 4915;
10784                 chan.context.holder_dust_limit_satoshis = 546;
10785
10786                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10787                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10788                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10789
10790                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10791                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10792                 chan.context.feerate_per_kw = 4894;
10793                 chan.context.holder_dust_limit_satoshis = 4001;
10794                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10795
10796                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10797                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10798                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10799
10800                 // commitment tx with two outputs untrimmed (maximum feerate)
10801                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10802                 chan.context.feerate_per_kw = 9651180;
10803                 chan.context.holder_dust_limit_satoshis = 546;
10804                 chan.context.channel_type = cached_channel_type.clone();
10805
10806                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10807                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10808                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10809
10810                 // commitment tx with one output untrimmed (minimum feerate)
10811                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10812                 chan.context.feerate_per_kw = 9651181;
10813
10814                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10815                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10816                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10817
10818                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10819                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10820                 chan.context.feerate_per_kw = 6216010;
10821                 chan.context.holder_dust_limit_satoshis = 4001;
10822                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10823
10824                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10825                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10826                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10827
10828                 // commitment tx with fee greater than funder amount
10829                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10830                 chan.context.feerate_per_kw = 9651936;
10831                 chan.context.holder_dust_limit_satoshis = 546;
10832                 chan.context.channel_type = cached_channel_type;
10833
10834                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10835                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10836                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10837
10838                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10839                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10840                 chan.context.feerate_per_kw = 253;
10841                 chan.context.pending_inbound_htlcs.clear();
10842                 chan.context.pending_inbound_htlcs.push({
10843                         let mut out = InboundHTLCOutput{
10844                                 htlc_id: 1,
10845                                 amount_msat: 2000000,
10846                                 cltv_expiry: 501,
10847                                 payment_hash: PaymentHash([0; 32]),
10848                                 state: InboundHTLCState::Committed,
10849                         };
10850                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10851                         out
10852                 });
10853                 chan.context.pending_outbound_htlcs.clear();
10854                 chan.context.pending_outbound_htlcs.push({
10855                         let mut out = OutboundHTLCOutput{
10856                                 htlc_id: 6,
10857                                 amount_msat: 5000001,
10858                                 cltv_expiry: 506,
10859                                 payment_hash: PaymentHash([0; 32]),
10860                                 state: OutboundHTLCState::Committed,
10861                                 source: HTLCSource::dummy(),
10862                                 skimmed_fee_msat: None,
10863                                 blinding_point: None,
10864                         };
10865                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10866                         out
10867                 });
10868                 chan.context.pending_outbound_htlcs.push({
10869                         let mut out = OutboundHTLCOutput{
10870                                 htlc_id: 5,
10871                                 amount_msat: 5000000,
10872                                 cltv_expiry: 505,
10873                                 payment_hash: PaymentHash([0; 32]),
10874                                 state: OutboundHTLCState::Committed,
10875                                 source: HTLCSource::dummy(),
10876                                 skimmed_fee_msat: None,
10877                                 blinding_point: None,
10878                         };
10879                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10880                         out
10881                 });
10882
10883                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10884                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10885                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10886
10887                                   { 0,
10888                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10889                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10890                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10891                                   { 1,
10892                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10893                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10894                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10895                                   { 2,
10896                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10897                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10898                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10899                 } );
10900
10901                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10902                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10903                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10904                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10905
10906                                   { 0,
10907                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10908                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10909                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10910                                   { 1,
10911                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10912                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10913                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10914                                   { 2,
10915                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10916                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10917                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10918                 } );
10919         }
10920
10921         #[test]
10922         fn test_per_commitment_secret_gen() {
10923                 // Test vectors from BOLT 3 Appendix D:
10924
10925                 let mut seed = [0; 32];
10926                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10927                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10928                            <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10929
10930                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10931                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10932                            <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10933
10934                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10935                            <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10936
10937                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10938                            <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10939
10940                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10941                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10942                            <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10943         }
10944
10945         #[test]
10946         fn test_key_derivation() {
10947                 // Test vectors from BOLT 3 Appendix E:
10948                 let secp_ctx = Secp256k1::new();
10949
10950                 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10951                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10952
10953                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10954                 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10955
10956                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10957                 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10958
10959                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10960                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10961
10962                 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10963                                 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10964
10965                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10966                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10967         }
10968
10969         #[test]
10970         fn test_zero_conf_channel_type_support() {
10971                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10972                 let secp_ctx = Secp256k1::new();
10973                 let seed = [42; 32];
10974                 let network = Network::Testnet;
10975                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10976                 let logger = test_utils::TestLogger::new();
10977
10978                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10979                 let config = UserConfig::default();
10980                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10981                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10982
10983                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10984                 channel_type_features.set_zero_conf_required();
10985
10986                 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10987                 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10988                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10989                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10990                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10991                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10992                 assert!(res.is_ok());
10993         }
10994
10995         #[test]
10996         fn test_supports_anchors_zero_htlc_tx_fee() {
10997                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10998                 // resulting `channel_type`.
10999                 let secp_ctx = Secp256k1::new();
11000                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
11001                 let network = Network::Testnet;
11002                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
11003                 let logger = test_utils::TestLogger::new();
11004
11005                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
11006                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
11007
11008                 let mut config = UserConfig::default();
11009                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
11010
11011                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
11012                 // need to signal it.
11013                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
11014                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
11015                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
11016                         &config, 0, 42, None
11017                 ).unwrap();
11018                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
11019
11020                 let mut expected_channel_type = ChannelTypeFeatures::empty();
11021                 expected_channel_type.set_static_remote_key_required();
11022                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
11023
11024                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
11025                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
11026                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
11027                         None
11028                 ).unwrap();
11029
11030                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
11031                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
11032                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
11033                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
11034                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
11035                 ).unwrap();
11036
11037                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
11038                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
11039         }
11040
11041         #[test]
11042         fn test_rejects_implicit_simple_anchors() {
11043                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
11044                 // each side's `InitFeatures`, it is rejected.
11045                 let secp_ctx = Secp256k1::new();
11046                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
11047                 let network = Network::Testnet;
11048                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
11049                 let logger = test_utils::TestLogger::new();
11050
11051                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
11052                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
11053
11054                 let config = UserConfig::default();
11055
11056                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
11057                 let static_remote_key_required: u64 = 1 << 12;
11058                 let simple_anchors_required: u64 = 1 << 20;
11059                 let raw_init_features = static_remote_key_required | simple_anchors_required;
11060                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
11061
11062                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
11063                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
11064                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
11065                         None
11066                 ).unwrap();
11067
11068                 // Set `channel_type` to `None` to force the implicit feature negotiation.
11069                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
11070                 open_channel_msg.common_fields.channel_type = None;
11071
11072                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
11073                 // `static_remote_key`, it will fail the channel.
11074                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
11075                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
11076                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
11077                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
11078                 );
11079                 assert!(channel_b.is_err());
11080         }
11081
11082         #[test]
11083         fn test_rejects_simple_anchors_channel_type() {
11084                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
11085                 // it is rejected.
11086                 let secp_ctx = Secp256k1::new();
11087                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
11088                 let network = Network::Testnet;
11089                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
11090                 let logger = test_utils::TestLogger::new();
11091
11092                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
11093                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
11094
11095                 let config = UserConfig::default();
11096
11097                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
11098                 let static_remote_key_required: u64 = 1 << 12;
11099                 let simple_anchors_required: u64 = 1 << 20;
11100                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
11101                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
11102                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
11103                 assert!(!simple_anchors_init.requires_unknown_bits());
11104                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
11105
11106                 // First, we'll try to open a channel between A and B where A requests a channel type for
11107                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
11108                 // B as it's not supported by LDK.
11109                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
11110                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
11111                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
11112                         None
11113                 ).unwrap();
11114
11115                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
11116                 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
11117
11118                 let res = InboundV1Channel::<&TestKeysInterface>::new(
11119                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
11120                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
11121                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
11122                 );
11123                 assert!(res.is_err());
11124
11125                 // Then, we'll try to open another channel where A requests a channel type for
11126                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
11127                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
11128                 // LDK.
11129                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
11130                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
11131                         10000000, 100000, 42, &config, 0, 42, None
11132                 ).unwrap();
11133
11134                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
11135
11136                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
11137                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
11138                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
11139                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
11140                 ).unwrap();
11141
11142                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
11143                 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
11144
11145                 let res = channel_a.accept_channel(
11146                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
11147                 );
11148                 assert!(res.is_err());
11149         }
11150
11151         #[test]
11152         fn test_waiting_for_batch() {
11153                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
11154                 let logger = test_utils::TestLogger::new();
11155                 let secp_ctx = Secp256k1::new();
11156                 let seed = [42; 32];
11157                 let network = Network::Testnet;
11158                 let best_block = BestBlock::from_network(network);
11159                 let chain_hash = ChainHash::using_genesis_block(network);
11160                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
11161
11162                 let mut config = UserConfig::default();
11163                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
11164                 // channel in a batch before all channels are ready.
11165                 config.channel_handshake_limits.trust_own_funding_0conf = true;
11166
11167                 // Create a channel from node a to node b that will be part of batch funding.
11168                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
11169                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
11170                         &feeest,
11171                         &&keys_provider,
11172                         &&keys_provider,
11173                         node_b_node_id,
11174                         &channelmanager::provided_init_features(&config),
11175                         10000000,
11176                         100000,
11177                         42,
11178                         &config,
11179                         0,
11180                         42,
11181                         None
11182                 ).unwrap();
11183
11184                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
11185                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
11186                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
11187                         &feeest,
11188                         &&keys_provider,
11189                         &&keys_provider,
11190                         node_b_node_id,
11191                         &channelmanager::provided_channel_type_features(&config),
11192                         &channelmanager::provided_init_features(&config),
11193                         &open_channel_msg,
11194                         7,
11195                         &config,
11196                         0,
11197                         &&logger,
11198                         true,  // Allow node b to send a 0conf channel_ready.
11199                 ).unwrap();
11200
11201                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
11202                 node_a_chan.accept_channel(
11203                         &accept_channel_msg,
11204                         &config.channel_handshake_limits,
11205                         &channelmanager::provided_init_features(&config),
11206                 ).unwrap();
11207
11208                 // Fund the channel with a batch funding transaction.
11209                 let output_script = node_a_chan.context.get_funding_redeemscript();
11210                 let tx = Transaction {
11211                         version: 1,
11212                         lock_time: LockTime::ZERO,
11213                         input: Vec::new(),
11214                         output: vec![
11215                                 TxOut {
11216                                         value: 10000000, script_pubkey: output_script.clone(),
11217                                 },
11218                                 TxOut {
11219                                         value: 10000000, script_pubkey: Builder::new().into_script(),
11220                                 },
11221                         ]};
11222                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
11223                 let funding_created_msg = node_a_chan.get_funding_created(
11224                         tx.clone(), funding_outpoint, true, &&logger,
11225                 ).map_err(|_| ()).unwrap();
11226                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
11227                         &funding_created_msg.unwrap(),
11228                         best_block,
11229                         &&keys_provider,
11230                         &&logger,
11231                 ).map_err(|_| ()).unwrap();
11232                 let node_b_updates = node_b_chan.monitor_updating_restored(
11233                         &&logger,
11234                         &&keys_provider,
11235                         chain_hash,
11236                         &config,
11237                         0,
11238                 );
11239
11240                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
11241                 // broadcasting the funding transaction until the batch is ready.
11242                 let res = node_a_chan.funding_signed(
11243                         &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
11244                 );
11245                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
11246                 let node_a_updates = node_a_chan.monitor_updating_restored(
11247                         &&logger,
11248                         &&keys_provider,
11249                         chain_hash,
11250                         &config,
11251                         0,
11252                 );
11253                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
11254                 // as the funding transaction depends on all channels in the batch becoming ready.
11255                 assert!(node_a_updates.channel_ready.is_none());
11256                 assert!(node_a_updates.funding_broadcastable.is_none());
11257                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
11258
11259                 // It is possible to receive a 0conf channel_ready from the remote node.
11260                 node_a_chan.channel_ready(
11261                         &node_b_updates.channel_ready.unwrap(),
11262                         &&keys_provider,
11263                         chain_hash,
11264                         &config,
11265                         &best_block,
11266                         &&logger,
11267                 ).unwrap();
11268                 assert_eq!(
11269                         node_a_chan.context.channel_state,
11270                         ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
11271                 );
11272
11273                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
11274                 node_a_chan.set_batch_ready();
11275                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
11276                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
11277         }
11278 }