Correct indentation in `get_pending_htlc_stats`
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::sighash;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
16
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
21
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
26
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs;
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
49
50 use crate::io;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::ops::Deref;
54 #[cfg(any(test, fuzzing, debug_assertions))]
55 use crate::sync::Mutex;
56 use crate::sign::type_resolver::ChannelSignerType;
57
58 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
59
60 #[cfg(test)]
61 pub struct ChannelValueStat {
62         pub value_to_self_msat: u64,
63         pub channel_value_msat: u64,
64         pub channel_reserve_msat: u64,
65         pub pending_outbound_htlcs_amount_msat: u64,
66         pub pending_inbound_htlcs_amount_msat: u64,
67         pub holding_cell_outbound_amount_msat: u64,
68         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
69         pub counterparty_dust_limit_msat: u64,
70 }
71
72 pub struct AvailableBalances {
73         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
74         pub balance_msat: u64,
75         /// Total amount available for our counterparty to send to us.
76         pub inbound_capacity_msat: u64,
77         /// Total amount available for us to send to our counterparty.
78         pub outbound_capacity_msat: u64,
79         /// The maximum value we can assign to the next outbound HTLC
80         pub next_outbound_htlc_limit_msat: u64,
81         /// The minimum value we can assign to the next outbound HTLC
82         pub next_outbound_htlc_minimum_msat: u64,
83 }
84
85 #[derive(Debug, Clone, Copy, PartialEq)]
86 enum FeeUpdateState {
87         // Inbound states mirroring InboundHTLCState
88         RemoteAnnounced,
89         AwaitingRemoteRevokeToAnnounce,
90         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
91         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
92         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
93         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
94         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
95
96         // Outbound state can only be `LocalAnnounced` or `Committed`
97         Outbound,
98 }
99
100 enum InboundHTLCRemovalReason {
101         FailRelay(msgs::OnionErrorPacket),
102         FailMalformed(([u8; 32], u16)),
103         Fulfill(PaymentPreimage),
104 }
105
106 /// Represents the resolution status of an inbound HTLC.
107 #[derive(Clone)]
108 enum InboundHTLCResolution {
109         /// Resolved implies the action we must take with the inbound HTLC has already been determined,
110         /// i.e., we already know whether it must be failed back or forwarded.
111         //
112         // TODO: Once this variant is removed, we should also clean up
113         // [`MonitorRestoreUpdates::accepted_htlcs`] as the path will be unreachable.
114         Resolved {
115                 pending_htlc_status: PendingHTLCStatus,
116         },
117         /// Pending implies we will attempt to resolve the inbound HTLC once it has been fully committed
118         /// to by both sides of the channel, i.e., once a `revoke_and_ack` has been processed by both
119         /// nodes for the state update in which it was proposed.
120         Pending {
121                 update_add_htlc: msgs::UpdateAddHTLC,
122         },
123 }
124
125 impl_writeable_tlv_based_enum!(InboundHTLCResolution,
126         (0, Resolved) => {
127                 (0, pending_htlc_status, required),
128         },
129         (2, Pending) => {
130                 (0, update_add_htlc, required),
131         };
132 );
133
134 enum InboundHTLCState {
135         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
136         /// update_add_htlc message for this HTLC.
137         RemoteAnnounced(InboundHTLCResolution),
138         /// Included in a received commitment_signed message (implying we've
139         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
140         /// state (see the example below). We have not yet included this HTLC in a
141         /// commitment_signed message because we are waiting on the remote's
142         /// aforementioned state revocation. One reason this missing remote RAA
143         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
144         /// is because every time we create a new "state", i.e. every time we sign a
145         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
146         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
147         /// sent provided the per_commitment_point for our current commitment tx.
148         /// The other reason we should not send a commitment_signed without their RAA
149         /// is because their RAA serves to ACK our previous commitment_signed.
150         ///
151         /// Here's an example of how an HTLC could come to be in this state:
152         /// remote --> update_add_htlc(prev_htlc)   --> local
153         /// remote --> commitment_signed(prev_htlc) --> local
154         /// remote <-- revoke_and_ack               <-- local
155         /// remote <-- commitment_signed(prev_htlc) <-- local
156         /// [note that here, the remote does not respond with a RAA]
157         /// remote --> update_add_htlc(this_htlc)   --> local
158         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
159         /// Now `this_htlc` will be assigned this state. It's unable to be officially
160         /// accepted, i.e. included in a commitment_signed, because we're missing the
161         /// RAA that provides our next per_commitment_point. The per_commitment_point
162         /// is used to derive commitment keys, which are used to construct the
163         /// signatures in a commitment_signed message.
164         /// Implies AwaitingRemoteRevoke.
165         ///
166         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
167         AwaitingRemoteRevokeToAnnounce(InboundHTLCResolution),
168         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
169         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
170         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
171         /// channel (before it can then get forwarded and/or removed).
172         /// Implies AwaitingRemoteRevoke.
173         AwaitingAnnouncedRemoteRevoke(InboundHTLCResolution),
174         Committed,
175         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
176         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
177         /// we'll drop it.
178         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
179         /// commitment transaction without it as otherwise we'll have to force-close the channel to
180         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
181         /// anyway). That said, ChannelMonitor does this for us (see
182         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
183         /// our own local state before then, once we're sure that the next commitment_signed and
184         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
185         LocalRemoved(InboundHTLCRemovalReason),
186 }
187
188 /// Exposes the state of pending inbound HTLCs.
189 ///
190 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
191 /// through the following states in the state machine:
192 /// - Announced for addition by the originating node through the update_add_htlc message.
193 /// - Added to the commitment transaction of the receiving node and originating node in turn
194 ///   through the exchange of commitment_signed and revoke_and_ack messages.
195 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
196 ///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
197 /// - Removed from the commitment transaction of the originating node and receiving node in turn
198 ///   through the exchange of commitment_signed and revoke_and_ack messages.
199 ///
200 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
201 #[derive(Clone, Debug, PartialEq)]
202 pub enum InboundHTLCStateDetails {
203         /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
204         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
205         /// before this HTLC is included on the remote commitment transaction.
206         AwaitingRemoteRevokeToAdd,
207         /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
208         /// and is included in both commitment transactions.
209         ///
210         /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
211         /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
212         /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
213         /// payment, it will only be claimed together with other required parts.
214         Committed,
215         /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
216         /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
217         /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
218         /// commitment transaction after update_fulfill_htlc.
219         AwaitingRemoteRevokeToRemoveFulfill,
220         /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
221         /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
222         /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
223         /// transaction.
224         AwaitingRemoteRevokeToRemoveFail,
225 }
226
227 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
228         fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
229                 match state {
230                         InboundHTLCState::RemoteAnnounced(_) => None,
231                         InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
232                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
233                         InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
234                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
235                         InboundHTLCState::Committed =>
236                                 Some(InboundHTLCStateDetails::Committed),
237                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
238                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
239                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
240                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
241                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
242                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
243                 }
244         }
245 }
246
247 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
248         (0, AwaitingRemoteRevokeToAdd) => {},
249         (2, Committed) => {},
250         (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
251         (6, AwaitingRemoteRevokeToRemoveFail) => {};
252 );
253
254 struct InboundHTLCOutput {
255         htlc_id: u64,
256         amount_msat: u64,
257         cltv_expiry: u32,
258         payment_hash: PaymentHash,
259         state: InboundHTLCState,
260 }
261
262 /// Exposes details around pending inbound HTLCs.
263 #[derive(Clone, Debug, PartialEq)]
264 pub struct InboundHTLCDetails {
265         /// The HTLC ID.
266         /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
267         /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
268         /// and not part of any commitment transaction.
269         pub htlc_id: u64,
270         /// The amount in msat.
271         pub amount_msat: u64,
272         /// The block height at which this HTLC expires.
273         pub cltv_expiry: u32,
274         /// The payment hash.
275         pub payment_hash: PaymentHash,
276         /// The state of the HTLC in the state machine.
277         ///
278         /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
279         /// waiting for to advance to the next state.
280         ///
281         /// See [`InboundHTLCStateDetails`] for information on the specific states.
282         ///
283         /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
284         /// states may result in `None` here.
285         pub state: Option<InboundHTLCStateDetails>,
286         /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
287         /// from the local commitment transaction and added to the commitment transaction fee.
288         /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
289         /// transactions as well.
290         ///
291         /// When the local commitment transaction is broadcasted as part of a unilateral closure,
292         /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
293         /// fee.
294         ///
295         /// Note that dust limits are specific to each party. An HTLC can be dust for the local
296         /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
297         pub is_dust: bool,
298 }
299
300 impl_writeable_tlv_based!(InboundHTLCDetails, {
301         (0, htlc_id, required),
302         (2, amount_msat, required),
303         (4, cltv_expiry, required),
304         (6, payment_hash, required),
305         (7, state, upgradable_option),
306         (8, is_dust, required),
307 });
308
309 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
310 enum OutboundHTLCState {
311         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
312         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
313         /// we will promote to Committed (note that they may not accept it until the next time we
314         /// revoke, but we don't really care about that:
315         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
316         ///    money back (though we won't), and,
317         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
318         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
319         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
320         ///    we'll never get out of sync).
321         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
322         /// OutboundHTLCOutput's size just for a temporary bit
323         LocalAnnounced(Box<msgs::OnionPacket>),
324         Committed,
325         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
326         /// the change (though they'll need to revoke before we fail the payment).
327         RemoteRemoved(OutboundHTLCOutcome),
328         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
329         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
330         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
331         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
332         /// remote revoke_and_ack on a previous state before we can do so.
333         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
334         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
335         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
336         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
337         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
338         /// revoke_and_ack to drop completely.
339         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
340 }
341
342 /// Exposes the state of pending outbound HTLCs.
343 ///
344 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
345 /// through the following states in the state machine:
346 /// - Announced for addition by the originating node through the update_add_htlc message.
347 /// - Added to the commitment transaction of the receiving node and originating node in turn
348 ///   through the exchange of commitment_signed and revoke_and_ack messages.
349 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
350 ///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
351 /// - Removed from the commitment transaction of the originating node and receiving node in turn
352 ///   through the exchange of commitment_signed and revoke_and_ack messages.
353 ///
354 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
355 #[derive(Clone, Debug, PartialEq)]
356 pub enum OutboundHTLCStateDetails {
357         /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
358         /// on the remote's commitment transaction after update_add_htlc.
359         AwaitingRemoteRevokeToAdd,
360         /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
361         /// and receiving revoke_and_ack in return.
362         ///
363         /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
364         /// unilaterally close the channel due to a timeout with an uncooperative remote node.
365         Committed,
366         /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
367         /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
368         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
369         /// for the removal from its commitment transaction.
370         AwaitingRemoteRevokeToRemoveSuccess,
371         /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
372         /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
373         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
374         /// for the removal from its commitment transaction.
375         AwaitingRemoteRevokeToRemoveFailure,
376 }
377
378 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
379         fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
380                 match state {
381                         OutboundHTLCState::LocalAnnounced(_) =>
382                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
383                         OutboundHTLCState::Committed =>
384                                 OutboundHTLCStateDetails::Committed,
385                         // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
386                         // the state yet.
387                         OutboundHTLCState::RemoteRemoved(_) =>
388                                 OutboundHTLCStateDetails::Committed,
389                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
390                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
391                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
392                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
393                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
394                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
395                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
396                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
397                 }
398         }
399 }
400
401 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
402         (0, AwaitingRemoteRevokeToAdd) => {},
403         (2, Committed) => {},
404         (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
405         (6, AwaitingRemoteRevokeToRemoveFailure) => {};
406 );
407
408 #[derive(Clone)]
409 #[cfg_attr(test, derive(Debug, PartialEq))]
410 enum OutboundHTLCOutcome {
411         /// LDK version 0.0.105+ will always fill in the preimage here.
412         Success(Option<PaymentPreimage>),
413         Failure(HTLCFailReason),
414 }
415
416 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
417         fn from(o: Option<HTLCFailReason>) -> Self {
418                 match o {
419                         None => OutboundHTLCOutcome::Success(None),
420                         Some(r) => OutboundHTLCOutcome::Failure(r)
421                 }
422         }
423 }
424
425 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
426         fn into(self) -> Option<&'a HTLCFailReason> {
427                 match self {
428                         OutboundHTLCOutcome::Success(_) => None,
429                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
430                 }
431         }
432 }
433
434 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
435 struct OutboundHTLCOutput {
436         htlc_id: u64,
437         amount_msat: u64,
438         cltv_expiry: u32,
439         payment_hash: PaymentHash,
440         state: OutboundHTLCState,
441         source: HTLCSource,
442         blinding_point: Option<PublicKey>,
443         skimmed_fee_msat: Option<u64>,
444 }
445
446 /// Exposes details around pending outbound HTLCs.
447 #[derive(Clone, Debug, PartialEq)]
448 pub struct OutboundHTLCDetails {
449         /// The HTLC ID.
450         /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
451         /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
452         /// and not part of any commitment transaction.
453         ///
454         /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
455         pub htlc_id: Option<u64>,
456         /// The amount in msat.
457         pub amount_msat: u64,
458         /// The block height at which this HTLC expires.
459         pub cltv_expiry: u32,
460         /// The payment hash.
461         pub payment_hash: PaymentHash,
462         /// The state of the HTLC in the state machine.
463         ///
464         /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
465         /// waiting for to advance to the next state.
466         ///
467         /// See [`OutboundHTLCStateDetails`] for information on the specific states.
468         ///
469         /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
470         /// states may result in `None` here.
471         pub state: Option<OutboundHTLCStateDetails>,
472         /// The extra fee being skimmed off the top of this HTLC.
473         pub skimmed_fee_msat: Option<u64>,
474         /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
475         /// from the local commitment transaction and added to the commitment transaction fee.
476         /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
477         /// transactions as well.
478         ///
479         /// When the local commitment transaction is broadcasted as part of a unilateral closure,
480         /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
481         /// fee.
482         ///
483         /// Note that dust limits are specific to each party. An HTLC can be dust for the local
484         /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
485         pub is_dust: bool,
486 }
487
488 impl_writeable_tlv_based!(OutboundHTLCDetails, {
489         (0, htlc_id, required),
490         (2, amount_msat, required),
491         (4, cltv_expiry, required),
492         (6, payment_hash, required),
493         (7, state, upgradable_option),
494         (8, skimmed_fee_msat, required),
495         (10, is_dust, required),
496 });
497
498 /// See AwaitingRemoteRevoke ChannelState for more info
499 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
500 enum HTLCUpdateAwaitingACK {
501         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
502                 // always outbound
503                 amount_msat: u64,
504                 cltv_expiry: u32,
505                 payment_hash: PaymentHash,
506                 source: HTLCSource,
507                 onion_routing_packet: msgs::OnionPacket,
508                 // The extra fee we're skimming off the top of this HTLC.
509                 skimmed_fee_msat: Option<u64>,
510                 blinding_point: Option<PublicKey>,
511         },
512         ClaimHTLC {
513                 payment_preimage: PaymentPreimage,
514                 htlc_id: u64,
515         },
516         FailHTLC {
517                 htlc_id: u64,
518                 err_packet: msgs::OnionErrorPacket,
519         },
520         FailMalformedHTLC {
521                 htlc_id: u64,
522                 failure_code: u16,
523                 sha256_of_onion: [u8; 32],
524         },
525 }
526
527 macro_rules! define_state_flags {
528         ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
529                 #[doc = $flag_type_doc]
530                 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
531                 struct $flag_type(u32);
532
533                 impl $flag_type {
534                         $(
535                                 #[doc = $flag_doc]
536                                 const $flag: $flag_type = $flag_type($value);
537                         )*
538
539                         /// All flags that apply to the specified [`ChannelState`] variant.
540                         #[allow(unused)]
541                         const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
542
543                         #[allow(unused)]
544                         fn new() -> Self { Self(0) }
545
546                         #[allow(unused)]
547                         fn from_u32(flags: u32) -> Result<Self, ()> {
548                                 if flags & !Self::ALL.0 != 0 {
549                                         Err(())
550                                 } else {
551                                         Ok($flag_type(flags))
552                                 }
553                         }
554
555                         #[allow(unused)]
556                         fn is_empty(&self) -> bool { self.0 == 0 }
557                         #[allow(unused)]
558                         fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
559                         #[allow(unused)]
560                         fn set(&mut self, flag: Self) { *self |= flag }
561                         #[allow(unused)]
562                         fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
563                 }
564
565                 $(
566                         define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
567                 )*
568
569                 impl core::ops::BitOr for $flag_type {
570                         type Output = Self;
571                         fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
572                 }
573                 impl core::ops::BitOrAssign for $flag_type {
574                         fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
575                 }
576                 impl core::ops::BitAnd for $flag_type {
577                         type Output = Self;
578                         fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
579                 }
580                 impl core::ops::BitAndAssign for $flag_type {
581                         fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
582                 }
583         };
584         ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
585                 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
586         };
587         ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
588                 impl $flag_type {
589                         #[allow(unused)]
590                         fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
591                         #[allow(unused)]
592                         fn $set(&mut self) { self.set($flag_type::new() | $flag) }
593                         #[allow(unused)]
594                         fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
595                 }
596         };
597         ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
598                 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
599
600                 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
601                         is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
602                 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
603                         is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
604                 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
605                         is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
606                 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
607                         is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
608
609                 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
610                         type Output = Self;
611                         fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
612                 }
613                 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
614                         fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
615                 }
616                 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
617                         type Output = Self;
618                         fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
619                 }
620                 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
621                         fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
622                 }
623                 impl PartialEq<FundedStateFlags> for $flag_type {
624                         fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
625                 }
626                 impl From<FundedStateFlags> for $flag_type {
627                         fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
628                 }
629         };
630 }
631
632 /// We declare all the states/flags here together to help determine which bits are still available
633 /// to choose.
634 mod state_flags {
635         pub const OUR_INIT_SENT: u32 = 1 << 0;
636         pub const THEIR_INIT_SENT: u32 = 1 << 1;
637         pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
638         pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
639         pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
640         pub const OUR_CHANNEL_READY: u32 = 1 << 5;
641         pub const CHANNEL_READY: u32 = 1 << 6;
642         pub const PEER_DISCONNECTED: u32 = 1 << 7;
643         pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
644         pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
645         pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
646         pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
647         pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
648         pub const WAITING_FOR_BATCH: u32 = 1 << 13;
649 }
650
651 define_state_flags!(
652         "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
653         FundedStateFlags, [
654                 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
655                         until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
656                         is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
657                 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
658                         somewhere and we should pause sending any outbound messages until they've managed to \
659                         complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
660                         is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
661                 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
662                         any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
663                         message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
664                         is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
665                 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
666                         the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
667                         is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
668         ]
669 );
670
671 define_state_flags!(
672         "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
673         NegotiatingFundingFlags, [
674                 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
675                         OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
676                 ("Indicates we have received their `open_channel`/`accept_channel` message.",
677                         THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
678         ]
679 );
680
681 define_state_flags!(
682         "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
683         FUNDED_STATE, AwaitingChannelReadyFlags, [
684                 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
685                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
686                         THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
687                         is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
688                 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
689                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
690                         OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
691                         is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
692                 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
693                         is being held until all channels in the batch have received `funding_signed` and have \
694                         their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
695                         is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
696         ]
697 );
698
699 define_state_flags!(
700         "Flags that only apply to [`ChannelState::ChannelReady`].",
701         FUNDED_STATE, ChannelReadyFlags, [
702                 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
703                         `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
704                         messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
705                         implicit ACK, so instead we have to hold them away temporarily to be sent later.",
706                         AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
707                         is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
708         ]
709 );
710
711 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
712 // into account when introducing new states and update `test_channel_state_order` accordingly.
713 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
714 enum ChannelState {
715         /// We are negotiating the parameters required for the channel prior to funding it.
716         NegotiatingFunding(NegotiatingFundingFlags),
717         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
718         /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
719         /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
720         FundingNegotiated,
721         /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
722         /// funding transaction to confirm.
723         AwaitingChannelReady(AwaitingChannelReadyFlags),
724         /// Both we and our counterparty consider the funding transaction confirmed and the channel is
725         /// now operational.
726         ChannelReady(ChannelReadyFlags),
727         /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
728         /// is about to drop us, but we store this anyway.
729         ShutdownComplete,
730 }
731
732 macro_rules! impl_state_flag {
733         ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
734                 #[allow(unused)]
735                 fn $get(&self) -> bool {
736                         match self {
737                                 $(
738                                         ChannelState::$state(flags) => flags.$get(),
739                                 )*
740                                 _ => false,
741                         }
742                 }
743                 #[allow(unused)]
744                 fn $set(&mut self) {
745                         match self {
746                                 $(
747                                         ChannelState::$state(flags) => flags.$set(),
748                                 )*
749                                 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
750                         }
751                 }
752                 #[allow(unused)]
753                 fn $clear(&mut self) {
754                         match self {
755                                 $(
756                                         ChannelState::$state(flags) => { let _ = flags.$clear(); },
757                                 )*
758                                 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
759                         }
760                 }
761         };
762         ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
763                 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
764         };
765         ($get: ident, $set: ident, $clear: ident, $state: ident) => {
766                 impl_state_flag!($get, $set, $clear, [$state]);
767         };
768 }
769
770 impl ChannelState {
771         fn from_u32(state: u32) -> Result<Self, ()> {
772                 match state {
773                         state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
774                         state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
775                         val => {
776                                 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
777                                         AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
778                                                 .map(|flags| ChannelState::AwaitingChannelReady(flags))
779                                 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
780                                         ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
781                                                 .map(|flags| ChannelState::ChannelReady(flags))
782                                 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
783                                         Ok(ChannelState::NegotiatingFunding(flags))
784                                 } else {
785                                         Err(())
786                                 }
787                         },
788                 }
789         }
790
791         fn to_u32(&self) -> u32 {
792                 match self {
793                         ChannelState::NegotiatingFunding(flags) => flags.0,
794                         ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
795                         ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
796                         ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
797                         ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
798                 }
799         }
800
801         fn is_pre_funded_state(&self) -> bool {
802                 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
803         }
804
805         fn is_both_sides_shutdown(&self) -> bool {
806                 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
807         }
808
809         fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
810                 match self {
811                         ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
812                         ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
813                         _ => FundedStateFlags::new(),
814                 }
815         }
816
817         fn can_generate_new_commitment(&self) -> bool {
818                 match self {
819                         ChannelState::ChannelReady(flags) =>
820                                 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
821                                         !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
822                                         !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
823                         _ => {
824                                 debug_assert!(false, "Can only generate new commitment within ChannelReady");
825                                 false
826                         },
827                 }
828         }
829
830         impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
831         impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
832         impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
833         impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
834         impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
835         impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
836         impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
837         impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
838 }
839
840 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
841
842 pub const DEFAULT_MAX_HTLCS: u16 = 50;
843
844 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
845         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
846         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
847         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
848 }
849
850 #[cfg(not(test))]
851 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
852 #[cfg(test)]
853 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
854
855 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
856
857 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
858 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
859 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
860 /// `holder_max_htlc_value_in_flight_msat`.
861 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
862
863 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
864 /// `option_support_large_channel` (aka wumbo channels) is not supported.
865 /// It's 2^24 - 1.
866 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
867
868 /// Total bitcoin supply in satoshis.
869 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
870
871 /// The maximum network dust limit for standard script formats. This currently represents the
872 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
873 /// transaction non-standard and thus refuses to relay it.
874 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
875 /// implementations use this value for their dust limit today.
876 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
877
878 /// The maximum channel dust limit we will accept from our counterparty.
879 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
880
881 /// The dust limit is used for both the commitment transaction outputs as well as the closing
882 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
883 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
884 /// In order to avoid having to concern ourselves with standardness during the closing process, we
885 /// simply require our counterparty to use a dust limit which will leave any segwit output
886 /// standard.
887 /// See <https://github.com/lightning/bolts/issues/905> for more details.
888 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
889
890 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
891 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
892
893 /// Used to return a simple Error back to ChannelManager. Will get converted to a
894 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
895 /// channel_id in ChannelManager.
896 pub(super) enum ChannelError {
897         Ignore(String),
898         Warn(String),
899         Close(String),
900 }
901
902 impl fmt::Debug for ChannelError {
903         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
904                 match self {
905                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
906                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
907                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
908                 }
909         }
910 }
911
912 impl fmt::Display for ChannelError {
913         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
914                 match self {
915                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
916                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
917                         &ChannelError::Close(ref e) => write!(f, "{}", e),
918                 }
919         }
920 }
921
922 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
923         pub logger: &'a L,
924         pub peer_id: Option<PublicKey>,
925         pub channel_id: Option<ChannelId>,
926 }
927
928 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
929         fn log(&self, mut record: Record) {
930                 record.peer_id = self.peer_id;
931                 record.channel_id = self.channel_id;
932                 self.logger.log(record)
933         }
934 }
935
936 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
937 where L::Target: Logger {
938         pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
939         where S::Target: SignerProvider
940         {
941                 WithChannelContext {
942                         logger,
943                         peer_id: Some(context.counterparty_node_id),
944                         channel_id: Some(context.channel_id),
945                 }
946         }
947 }
948
949 macro_rules! secp_check {
950         ($res: expr, $err: expr) => {
951                 match $res {
952                         Ok(thing) => thing,
953                         Err(_) => return Err(ChannelError::Close($err)),
954                 }
955         };
956 }
957
958 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
959 /// our counterparty or not. However, we don't want to announce updates right away to avoid
960 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
961 /// our channel_update message and track the current state here.
962 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
963 #[derive(Clone, Copy, PartialEq)]
964 pub(super) enum ChannelUpdateStatus {
965         /// We've announced the channel as enabled and are connected to our peer.
966         Enabled,
967         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
968         DisabledStaged(u8),
969         /// Our channel is live again, but we haven't announced the channel as enabled yet.
970         EnabledStaged(u8),
971         /// We've announced the channel as disabled.
972         Disabled,
973 }
974
975 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
976 #[derive(PartialEq)]
977 pub enum AnnouncementSigsState {
978         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
979         /// we sent the last `AnnouncementSignatures`.
980         NotSent,
981         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
982         /// This state never appears on disk - instead we write `NotSent`.
983         MessageSent,
984         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
985         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
986         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
987         /// they send back a `RevokeAndACK`.
988         /// This state never appears on disk - instead we write `NotSent`.
989         Committed,
990         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
991         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
992         PeerReceived,
993 }
994
995 /// An enum indicating whether the local or remote side offered a given HTLC.
996 enum HTLCInitiator {
997         LocalOffered,
998         RemoteOffered,
999 }
1000
1001 /// Current counts of various HTLCs, useful for calculating current balances available exactly.
1002 struct HTLCStats {
1003         pending_inbound_htlcs: usize,
1004         pending_outbound_htlcs: usize,
1005         pending_inbound_htlcs_value_msat: u64,
1006         pending_outbound_htlcs_value_msat: u64,
1007         on_counterparty_tx_dust_exposure_msat: u64,
1008         on_holder_tx_dust_exposure_msat: u64,
1009         outbound_holding_cell_msat: u64,
1010         on_holder_tx_outbound_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
1011 }
1012
1013 /// An enum gathering stats on commitment transaction, either local or remote.
1014 struct CommitmentStats<'a> {
1015         tx: CommitmentTransaction, // the transaction info
1016         feerate_per_kw: u32, // the feerate included to build the transaction
1017         total_fee_sat: u64, // the total fee included in the transaction
1018         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
1019         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
1020         local_balance_msat: u64, // local balance before fees *not* considering dust limits
1021         remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
1022         outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
1023         inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
1024 }
1025
1026 /// Used when calculating whether we or the remote can afford an additional HTLC.
1027 struct HTLCCandidate {
1028         amount_msat: u64,
1029         origin: HTLCInitiator,
1030 }
1031
1032 impl HTLCCandidate {
1033         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1034                 Self {
1035                         amount_msat,
1036                         origin,
1037                 }
1038         }
1039 }
1040
1041 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1042 /// description
1043 enum UpdateFulfillFetch {
1044         NewClaim {
1045                 monitor_update: ChannelMonitorUpdate,
1046                 htlc_value_msat: u64,
1047                 msg: Option<msgs::UpdateFulfillHTLC>,
1048         },
1049         DuplicateClaim {},
1050 }
1051
1052 /// The return type of get_update_fulfill_htlc_and_commit.
1053 pub enum UpdateFulfillCommitFetch {
1054         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1055         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1056         /// previously placed in the holding cell (and has since been removed).
1057         NewClaim {
1058                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1059                 monitor_update: ChannelMonitorUpdate,
1060                 /// The value of the HTLC which was claimed, in msat.
1061                 htlc_value_msat: u64,
1062         },
1063         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1064         /// or has been forgotten (presumably previously claimed).
1065         DuplicateClaim {},
1066 }
1067
1068 /// The return value of `monitor_updating_restored`
1069 pub(super) struct MonitorRestoreUpdates {
1070         pub raa: Option<msgs::RevokeAndACK>,
1071         pub commitment_update: Option<msgs::CommitmentUpdate>,
1072         pub order: RAACommitmentOrder,
1073         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1074         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1075         pub finalized_claimed_htlcs: Vec<HTLCSource>,
1076         pub pending_update_adds: Vec<msgs::UpdateAddHTLC>,
1077         pub funding_broadcastable: Option<Transaction>,
1078         pub channel_ready: Option<msgs::ChannelReady>,
1079         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1080 }
1081
1082 /// The return value of `signer_maybe_unblocked`
1083 #[allow(unused)]
1084 pub(super) struct SignerResumeUpdates {
1085         pub commitment_update: Option<msgs::CommitmentUpdate>,
1086         pub funding_signed: Option<msgs::FundingSigned>,
1087         pub channel_ready: Option<msgs::ChannelReady>,
1088 }
1089
1090 /// The return value of `channel_reestablish`
1091 pub(super) struct ReestablishResponses {
1092         pub channel_ready: Option<msgs::ChannelReady>,
1093         pub raa: Option<msgs::RevokeAndACK>,
1094         pub commitment_update: Option<msgs::CommitmentUpdate>,
1095         pub order: RAACommitmentOrder,
1096         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1097         pub shutdown_msg: Option<msgs::Shutdown>,
1098 }
1099
1100 /// The result of a shutdown that should be handled.
1101 #[must_use]
1102 pub(crate) struct ShutdownResult {
1103         pub(crate) closure_reason: ClosureReason,
1104         /// A channel monitor update to apply.
1105         pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1106         /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1107         pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1108         /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1109         /// propagated to the remainder of the batch.
1110         pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1111         pub(crate) channel_id: ChannelId,
1112         pub(crate) user_channel_id: u128,
1113         pub(crate) channel_capacity_satoshis: u64,
1114         pub(crate) counterparty_node_id: PublicKey,
1115         pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1116         pub(crate) channel_funding_txo: Option<OutPoint>,
1117 }
1118
1119 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1120 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1121 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1122 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1123 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1124 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1125 /// by this multiple without hitting this case, before sending.
1126 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1127 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1128 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1129 /// leave the channel less usable as we hold a bigger reserve.
1130 #[cfg(any(fuzzing, test))]
1131 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1132 #[cfg(not(any(fuzzing, test)))]
1133 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1134
1135 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1136 /// channel creation on an inbound channel, we simply force-close and move on.
1137 /// This constant is the one suggested in BOLT 2.
1138 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1139
1140 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1141 /// not have enough balance value remaining to cover the onchain cost of this new
1142 /// HTLC weight. If this happens, our counterparty fails the reception of our
1143 /// commitment_signed including this new HTLC due to infringement on the channel
1144 /// reserve.
1145 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1146 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1147 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1148 /// design of LN state machines, allowing asynchronous updates.
1149 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1150
1151 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1152 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1153 /// transaction (not counting the value of the HTLCs themselves).
1154 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1155
1156 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1157 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1158 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1159 ///
1160 ///   * The expected interval between ticks (1 minute).
1161 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1162 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1163 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1164 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1165
1166 /// The number of ticks that may elapse while we're waiting for a response to a
1167 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1168 /// them.
1169 ///
1170 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1171 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1172
1173 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1174 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1175 /// exceeding this age limit will be force-closed and purged from memory.
1176 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1177
1178 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1179 pub(crate) const COINBASE_MATURITY: u32 = 100;
1180
1181 struct PendingChannelMonitorUpdate {
1182         update: ChannelMonitorUpdate,
1183 }
1184
1185 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1186         (0, update, required),
1187 });
1188
1189 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1190 /// its variants containing an appropriate channel struct.
1191 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1192         UnfundedOutboundV1(OutboundV1Channel<SP>),
1193         UnfundedInboundV1(InboundV1Channel<SP>),
1194         #[cfg(any(dual_funding, splicing))]
1195         UnfundedOutboundV2(OutboundV2Channel<SP>),
1196         #[cfg(any(dual_funding, splicing))]
1197         UnfundedInboundV2(InboundV2Channel<SP>),
1198         Funded(Channel<SP>),
1199 }
1200
1201 impl<'a, SP: Deref> ChannelPhase<SP> where
1202         SP::Target: SignerProvider,
1203         <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1204 {
1205         pub fn context(&'a self) -> &'a ChannelContext<SP> {
1206                 match self {
1207                         ChannelPhase::Funded(chan) => &chan.context,
1208                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1209                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1210                         #[cfg(any(dual_funding, splicing))]
1211                         ChannelPhase::UnfundedOutboundV2(chan) => &chan.context,
1212                         #[cfg(any(dual_funding, splicing))]
1213                         ChannelPhase::UnfundedInboundV2(chan) => &chan.context,
1214                 }
1215         }
1216
1217         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1218                 match self {
1219                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1220                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1221                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1222                         #[cfg(any(dual_funding, splicing))]
1223                         ChannelPhase::UnfundedOutboundV2(ref mut chan) => &mut chan.context,
1224                         #[cfg(any(dual_funding, splicing))]
1225                         ChannelPhase::UnfundedInboundV2(ref mut chan) => &mut chan.context,
1226                 }
1227         }
1228 }
1229
1230 /// Contains all state common to unfunded inbound/outbound channels.
1231 pub(super) struct UnfundedChannelContext {
1232         /// A counter tracking how many ticks have elapsed since this unfunded channel was
1233         /// created. If this unfunded channel reaches peer has yet to respond after reaching
1234         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1235         ///
1236         /// This is so that we don't keep channels around that haven't progressed to a funded state
1237         /// in a timely manner.
1238         unfunded_channel_age_ticks: usize,
1239 }
1240
1241 impl UnfundedChannelContext {
1242         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1243         /// having reached the unfunded channel age limit.
1244         ///
1245         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1246         pub fn should_expire_unfunded_channel(&mut self) -> bool {
1247                 self.unfunded_channel_age_ticks += 1;
1248                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1249         }
1250 }
1251
1252 /// Contains everything about the channel including state, and various flags.
1253 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1254         config: LegacyChannelConfig,
1255
1256         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1257         // constructed using it. The second element in the tuple corresponds to the number of ticks that
1258         // have elapsed since the update occurred.
1259         prev_config: Option<(ChannelConfig, usize)>,
1260
1261         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1262
1263         user_id: u128,
1264
1265         /// The current channel ID.
1266         channel_id: ChannelId,
1267         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1268         /// Will be `None` for channels created prior to 0.0.115.
1269         temporary_channel_id: Option<ChannelId>,
1270         channel_state: ChannelState,
1271
1272         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1273         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1274         // next connect.
1275         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1276         // Note that a number of our tests were written prior to the behavior here which retransmits
1277         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1278         // many tests.
1279         #[cfg(any(test, feature = "_test_utils"))]
1280         pub(crate) announcement_sigs_state: AnnouncementSigsState,
1281         #[cfg(not(any(test, feature = "_test_utils")))]
1282         announcement_sigs_state: AnnouncementSigsState,
1283
1284         secp_ctx: Secp256k1<secp256k1::All>,
1285         channel_value_satoshis: u64,
1286
1287         latest_monitor_update_id: u64,
1288
1289         holder_signer: ChannelSignerType<SP>,
1290         shutdown_scriptpubkey: Option<ShutdownScript>,
1291         destination_script: ScriptBuf,
1292
1293         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1294         // generation start at 0 and count up...this simplifies some parts of implementation at the
1295         // cost of others, but should really just be changed.
1296
1297         cur_holder_commitment_transaction_number: u64,
1298         cur_counterparty_commitment_transaction_number: u64,
1299         value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1300         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1301         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1302         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1303
1304         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1305         /// need to ensure we resend them in the order we originally generated them. Note that because
1306         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1307         /// sufficient to simply set this to the opposite of any message we are generating as we
1308         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1309         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1310         /// send it first.
1311         resend_order: RAACommitmentOrder,
1312
1313         monitor_pending_channel_ready: bool,
1314         monitor_pending_revoke_and_ack: bool,
1315         monitor_pending_commitment_signed: bool,
1316
1317         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1318         // responsible for some of the HTLCs here or not - we don't know whether the update in question
1319         // completed or not. We currently ignore these fields entirely when force-closing a channel,
1320         // but need to handle this somehow or we run the risk of losing HTLCs!
1321         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1322         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1323         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1324         monitor_pending_update_adds: Vec<msgs::UpdateAddHTLC>,
1325
1326         /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1327         /// but our signer (initially) refused to give us a signature, we should retry at some point in
1328         /// the future when the signer indicates it may have a signature for us.
1329         ///
1330         /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1331         /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1332         signer_pending_commitment_update: bool,
1333         /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1334         /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1335         /// outbound or inbound.
1336         signer_pending_funding: bool,
1337
1338         // pending_update_fee is filled when sending and receiving update_fee.
1339         //
1340         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1341         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1342         // generating new commitment transactions with exactly the same criteria as inbound/outbound
1343         // HTLCs with similar state.
1344         pending_update_fee: Option<(u32, FeeUpdateState)>,
1345         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1346         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1347         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1348         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1349         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1350         holding_cell_update_fee: Option<u32>,
1351         next_holder_htlc_id: u64,
1352         next_counterparty_htlc_id: u64,
1353         feerate_per_kw: u32,
1354
1355         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1356         /// when the channel is updated in ways which may impact the `channel_update` message or when a
1357         /// new block is received, ensuring it's always at least moderately close to the current real
1358         /// time.
1359         update_time_counter: u32,
1360
1361         #[cfg(debug_assertions)]
1362         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1363         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1364         #[cfg(debug_assertions)]
1365         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1366         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1367
1368         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1369         target_closing_feerate_sats_per_kw: Option<u32>,
1370
1371         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1372         /// update, we need to delay processing it until later. We do that here by simply storing the
1373         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1374         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1375
1376         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1377         /// transaction. These are set once we reach `closing_negotiation_ready`.
1378         #[cfg(test)]
1379         pub(crate) closing_fee_limits: Option<(u64, u64)>,
1380         #[cfg(not(test))]
1381         closing_fee_limits: Option<(u64, u64)>,
1382
1383         /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1384         /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1385         /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1386         /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1387         /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1388         ///
1389         /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1390         /// until we see a `commitment_signed` before doing so.
1391         ///
1392         /// We don't bother to persist this - we anticipate this state won't last longer than a few
1393         /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1394         expecting_peer_commitment_signed: bool,
1395
1396         /// The hash of the block in which the funding transaction was included.
1397         funding_tx_confirmed_in: Option<BlockHash>,
1398         funding_tx_confirmation_height: u32,
1399         short_channel_id: Option<u64>,
1400         /// Either the height at which this channel was created or the height at which it was last
1401         /// serialized if it was serialized by versions prior to 0.0.103.
1402         /// We use this to close if funding is never broadcasted.
1403         channel_creation_height: u32,
1404
1405         counterparty_dust_limit_satoshis: u64,
1406
1407         #[cfg(test)]
1408         pub(super) holder_dust_limit_satoshis: u64,
1409         #[cfg(not(test))]
1410         holder_dust_limit_satoshis: u64,
1411
1412         #[cfg(test)]
1413         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1414         #[cfg(not(test))]
1415         counterparty_max_htlc_value_in_flight_msat: u64,
1416
1417         #[cfg(test)]
1418         pub(super) holder_max_htlc_value_in_flight_msat: u64,
1419         #[cfg(not(test))]
1420         holder_max_htlc_value_in_flight_msat: u64,
1421
1422         /// minimum channel reserve for self to maintain - set by them.
1423         counterparty_selected_channel_reserve_satoshis: Option<u64>,
1424
1425         #[cfg(test)]
1426         pub(super) holder_selected_channel_reserve_satoshis: u64,
1427         #[cfg(not(test))]
1428         holder_selected_channel_reserve_satoshis: u64,
1429
1430         counterparty_htlc_minimum_msat: u64,
1431         holder_htlc_minimum_msat: u64,
1432         #[cfg(test)]
1433         pub counterparty_max_accepted_htlcs: u16,
1434         #[cfg(not(test))]
1435         counterparty_max_accepted_htlcs: u16,
1436         holder_max_accepted_htlcs: u16,
1437         minimum_depth: Option<u32>,
1438
1439         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1440
1441         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1442         funding_transaction: Option<Transaction>,
1443         is_batch_funding: Option<()>,
1444
1445         counterparty_cur_commitment_point: Option<PublicKey>,
1446         counterparty_prev_commitment_point: Option<PublicKey>,
1447         counterparty_node_id: PublicKey,
1448
1449         counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1450
1451         commitment_secrets: CounterpartyCommitmentSecrets,
1452
1453         channel_update_status: ChannelUpdateStatus,
1454         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1455         /// not complete within a single timer tick (one minute), we should force-close the channel.
1456         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1457         /// to DoS us.
1458         /// Note that this field is reset to false on deserialization to give us a chance to connect to
1459         /// our peer and start the closing_signed negotiation fresh.
1460         closing_signed_in_flight: bool,
1461
1462         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1463         /// This can be used to rebroadcast the channel_announcement message later.
1464         announcement_sigs: Option<(Signature, Signature)>,
1465
1466         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1467         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1468         // be, by comparing the cached values to the fee of the tranaction generated by
1469         // `build_commitment_transaction`.
1470         #[cfg(any(test, fuzzing))]
1471         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1472         #[cfg(any(test, fuzzing))]
1473         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1474
1475         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1476         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1477         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1478         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1479         /// message until we receive a channel_reestablish.
1480         ///
1481         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1482         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1483
1484         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1485         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1486         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1487         /// unblock the state machine.
1488         ///
1489         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1490         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1491         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1492         ///
1493         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1494         /// [`msgs::RevokeAndACK`] message from the counterparty.
1495         sent_message_awaiting_response: Option<usize>,
1496
1497         #[cfg(any(test, fuzzing))]
1498         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1499         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1500         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1501         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1502         // is fine, but as a sanity check in our failure to generate the second claim, we check here
1503         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1504         historical_inbound_htlc_fulfills: HashSet<u64>,
1505
1506         /// This channel's type, as negotiated during channel open
1507         channel_type: ChannelTypeFeatures,
1508
1509         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1510         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1511         // the channel's funding UTXO.
1512         //
1513         // We also use this when sending our peer a channel_update that isn't to be broadcasted
1514         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1515         // associated channel mapping.
1516         //
1517         // We only bother storing the most recent SCID alias at any time, though our counterparty has
1518         // to store all of them.
1519         latest_inbound_scid_alias: Option<u64>,
1520
1521         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1522         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1523         // don't currently support node id aliases and eventually privacy should be provided with
1524         // blinded paths instead of simple scid+node_id aliases.
1525         outbound_scid_alias: u64,
1526
1527         // We track whether we already emitted a `ChannelPending` event.
1528         channel_pending_event_emitted: bool,
1529
1530         // We track whether we already emitted a `ChannelReady` event.
1531         channel_ready_event_emitted: bool,
1532
1533         /// Some if we initiated to shut down the channel.
1534         local_initiated_shutdown: Option<()>,
1535
1536         /// The unique identifier used to re-derive the private key material for the channel through
1537         /// [`SignerProvider::derive_channel_signer`].
1538         #[cfg(not(test))]
1539         channel_keys_id: [u8; 32],
1540         #[cfg(test)]
1541         pub channel_keys_id: [u8; 32],
1542
1543         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1544         /// store it here and only release it to the `ChannelManager` once it asks for it.
1545         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1546 }
1547
1548 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
1549         fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1550                 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1551                 entropy_source: &'a ES,
1552                 signer_provider: &'a SP,
1553                 counterparty_node_id: PublicKey,
1554                 their_features: &'a InitFeatures,
1555                 user_id: u128,
1556                 config: &'a UserConfig,
1557                 current_chain_height: u32,
1558                 logger: &'a L,
1559                 is_0conf: bool,
1560                 our_funding_satoshis: u64,
1561                 counterparty_pubkeys: ChannelPublicKeys,
1562                 channel_type: ChannelTypeFeatures,
1563                 holder_selected_channel_reserve_satoshis: u64,
1564                 msg_channel_reserve_satoshis: u64,
1565                 msg_push_msat: u64,
1566                 open_channel_fields: msgs::CommonOpenChannelFields,
1567         ) -> Result<ChannelContext<SP>, ChannelError>
1568                 where
1569                         ES::Target: EntropySource,
1570                         F::Target: FeeEstimator,
1571                         L::Target: Logger,
1572                         SP::Target: SignerProvider,
1573         {
1574                 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
1575                 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1576
1577                 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1578
1579                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1580                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1581                 let pubkeys = holder_signer.pubkeys().clone();
1582
1583                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1584                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1585                 }
1586
1587                 // Check sanity of message fields:
1588                 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1589                         return Err(ChannelError::Close(format!(
1590                                 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1591                                 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1592                                 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1593                 }
1594                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1595                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1596                 }
1597                 if msg_channel_reserve_satoshis > channel_value_satoshis {
1598                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1599                 }
1600                 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1601                 if msg_push_msat > full_channel_value_msat {
1602                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1603                 }
1604                 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1605                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1606                 }
1607                 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1608                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1609                 }
1610                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1611
1612                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1613                 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1614                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1615                 }
1616                 if open_channel_fields.max_accepted_htlcs < 1 {
1617                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1618                 }
1619                 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1620                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1621                 }
1622
1623                 // Now check against optional parameters as set by config...
1624                 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1625                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1626                 }
1627                 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1628                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1629                 }
1630                 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1631                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1632                 }
1633                 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1634                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1635                 }
1636                 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1637                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1638                 }
1639                 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1640                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1641                 }
1642                 if open_channel_fields.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
1643                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1644                 }
1645
1646                 // Convert things into internal flags and prep our state:
1647
1648                 if config.channel_handshake_limits.force_announced_channel_preference {
1649                         if config.channel_handshake_config.announced_channel != announced_channel {
1650                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1651                         }
1652                 }
1653
1654                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1655                         // Protocol level safety check in place, although it should never happen because
1656                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1657                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1658                 }
1659                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1660                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1661                 }
1662                 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1663                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1664                                 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1665                 }
1666                 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1667                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1668                 }
1669
1670                 // check if the funder's amount for the initial commitment tx is sufficient
1671                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1672                 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1673                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1674                 } else {
1675                         0
1676                 };
1677                 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1678                 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1679                 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1680                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1681                 }
1682
1683                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1684                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1685                 // want to push much to us), our counterparty should always have more than our reserve.
1686                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1687                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1688                 }
1689
1690                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1691                         match &open_channel_fields.shutdown_scriptpubkey {
1692                                 &Some(ref script) => {
1693                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1694                                         if script.len() == 0 {
1695                                                 None
1696                                         } else {
1697                                                 if !script::is_bolt2_compliant(&script, their_features) {
1698                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1699                                                 }
1700                                                 Some(script.clone())
1701                                         }
1702                                 },
1703                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1704                                 &None => {
1705                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1706                                 }
1707                         }
1708                 } else { None };
1709
1710                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1711                         match signer_provider.get_shutdown_scriptpubkey() {
1712                                 Ok(scriptpubkey) => Some(scriptpubkey),
1713                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1714                         }
1715                 } else { None };
1716
1717                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1718                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
1719                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1720                         }
1721                 }
1722
1723                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1724                         Ok(script) => script,
1725                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1726                 };
1727
1728                 let mut secp_ctx = Secp256k1::new();
1729                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1730
1731                 let minimum_depth = if is_0conf {
1732                         Some(0)
1733                 } else {
1734                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1735                 };
1736
1737                 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1738
1739                 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1740
1741                 let channel_context = ChannelContext {
1742                         user_id,
1743
1744                         config: LegacyChannelConfig {
1745                                 options: config.channel_config.clone(),
1746                                 announced_channel,
1747                                 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1748                         },
1749
1750                         prev_config: None,
1751
1752                         inbound_handshake_limits_override: None,
1753
1754                         temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1755                         channel_id: open_channel_fields.temporary_channel_id,
1756                         channel_state: ChannelState::NegotiatingFunding(
1757                                 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1758                         ),
1759                         announcement_sigs_state: AnnouncementSigsState::NotSent,
1760                         secp_ctx,
1761
1762                         latest_monitor_update_id: 0,
1763
1764                         holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1765                         shutdown_scriptpubkey,
1766                         destination_script,
1767
1768                         cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1769                         cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1770                         value_to_self_msat,
1771
1772                         pending_inbound_htlcs: Vec::new(),
1773                         pending_outbound_htlcs: Vec::new(),
1774                         holding_cell_htlc_updates: Vec::new(),
1775                         pending_update_fee: None,
1776                         holding_cell_update_fee: None,
1777                         next_holder_htlc_id: 0,
1778                         next_counterparty_htlc_id: 0,
1779                         update_time_counter: 1,
1780
1781                         resend_order: RAACommitmentOrder::CommitmentFirst,
1782
1783                         monitor_pending_channel_ready: false,
1784                         monitor_pending_revoke_and_ack: false,
1785                         monitor_pending_commitment_signed: false,
1786                         monitor_pending_forwards: Vec::new(),
1787                         monitor_pending_failures: Vec::new(),
1788                         monitor_pending_finalized_fulfills: Vec::new(),
1789                         monitor_pending_update_adds: Vec::new(),
1790
1791                         signer_pending_commitment_update: false,
1792                         signer_pending_funding: false,
1793
1794
1795                         #[cfg(debug_assertions)]
1796                         holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1797                         #[cfg(debug_assertions)]
1798                         counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1799
1800                         last_sent_closing_fee: None,
1801                         pending_counterparty_closing_signed: None,
1802                         expecting_peer_commitment_signed: false,
1803                         closing_fee_limits: None,
1804                         target_closing_feerate_sats_per_kw: None,
1805
1806                         funding_tx_confirmed_in: None,
1807                         funding_tx_confirmation_height: 0,
1808                         short_channel_id: None,
1809                         channel_creation_height: current_chain_height,
1810
1811                         feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1812                         channel_value_satoshis,
1813                         counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1814                         holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1815                         counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1816                         holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1817                         counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1818                         holder_selected_channel_reserve_satoshis,
1819                         counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1820                         holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1821                         counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1822                         holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1823                         minimum_depth,
1824
1825                         counterparty_forwarding_info: None,
1826
1827                         channel_transaction_parameters: ChannelTransactionParameters {
1828                                 holder_pubkeys: pubkeys,
1829                                 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1830                                 is_outbound_from_holder: false,
1831                                 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1832                                         selected_contest_delay: open_channel_fields.to_self_delay,
1833                                         pubkeys: counterparty_pubkeys,
1834                                 }),
1835                                 funding_outpoint: None,
1836                                 channel_type_features: channel_type.clone()
1837                         },
1838                         funding_transaction: None,
1839                         is_batch_funding: None,
1840
1841                         counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1842                         counterparty_prev_commitment_point: None,
1843                         counterparty_node_id,
1844
1845                         counterparty_shutdown_scriptpubkey,
1846
1847                         commitment_secrets: CounterpartyCommitmentSecrets::new(),
1848
1849                         channel_update_status: ChannelUpdateStatus::Enabled,
1850                         closing_signed_in_flight: false,
1851
1852                         announcement_sigs: None,
1853
1854                         #[cfg(any(test, fuzzing))]
1855                         next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1856                         #[cfg(any(test, fuzzing))]
1857                         next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1858
1859                         workaround_lnd_bug_4006: None,
1860                         sent_message_awaiting_response: None,
1861
1862                         latest_inbound_scid_alias: None,
1863                         outbound_scid_alias: 0,
1864
1865                         channel_pending_event_emitted: false,
1866                         channel_ready_event_emitted: false,
1867
1868                         #[cfg(any(test, fuzzing))]
1869                         historical_inbound_htlc_fulfills: new_hash_set(),
1870
1871                         channel_type,
1872                         channel_keys_id,
1873
1874                         local_initiated_shutdown: None,
1875
1876                         blocked_monitor_updates: Vec::new(),
1877                 };
1878
1879                 Ok(channel_context)
1880         }
1881
1882         fn new_for_outbound_channel<'a, ES: Deref, F: Deref>(
1883                 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1884                 entropy_source: &'a ES,
1885                 signer_provider: &'a SP,
1886                 counterparty_node_id: PublicKey,
1887                 their_features: &'a InitFeatures,
1888                 funding_satoshis: u64,
1889                 push_msat: u64,
1890                 user_id: u128,
1891                 config: &'a UserConfig,
1892                 current_chain_height: u32,
1893                 outbound_scid_alias: u64,
1894                 temporary_channel_id: Option<ChannelId>,
1895                 holder_selected_channel_reserve_satoshis: u64,
1896                 channel_keys_id: [u8; 32],
1897                 holder_signer: <SP::Target as SignerProvider>::EcdsaSigner,
1898                 pubkeys: ChannelPublicKeys,
1899         ) -> Result<ChannelContext<SP>, APIError>
1900                 where
1901                         ES::Target: EntropySource,
1902                         F::Target: FeeEstimator,
1903                         SP::Target: SignerProvider,
1904         {
1905                 // This will be updated with the counterparty contribution if this is a dual-funded channel
1906                 let channel_value_satoshis = funding_satoshis;
1907
1908                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
1909
1910                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
1911                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
1912                 }
1913                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1914                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
1915                 }
1916                 let channel_value_msat = channel_value_satoshis * 1000;
1917                 if push_msat > channel_value_msat {
1918                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
1919                 }
1920                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
1921                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
1922                 }
1923
1924                 let channel_type = get_initial_channel_type(&config, their_features);
1925                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
1926
1927                 let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1928                         (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
1929                 } else {
1930                         (ConfirmationTarget::NonAnchorChannelFee, 0)
1931                 };
1932                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
1933
1934                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
1935                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
1936                 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
1937                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
1938                 }
1939
1940                 let mut secp_ctx = Secp256k1::new();
1941                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1942
1943                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1944                         match signer_provider.get_shutdown_scriptpubkey() {
1945                                 Ok(scriptpubkey) => Some(scriptpubkey),
1946                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
1947                         }
1948                 } else { None };
1949
1950                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1951                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
1952                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
1953                         }
1954                 }
1955
1956                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1957                         Ok(script) => script,
1958                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
1959                 };
1960
1961                 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
1962
1963                 Ok(Self {
1964                         user_id,
1965
1966                         config: LegacyChannelConfig {
1967                                 options: config.channel_config.clone(),
1968                                 announced_channel: config.channel_handshake_config.announced_channel,
1969                                 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1970                         },
1971
1972                         prev_config: None,
1973
1974                         inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
1975
1976                         channel_id: temporary_channel_id,
1977                         temporary_channel_id: Some(temporary_channel_id),
1978                         channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
1979                         announcement_sigs_state: AnnouncementSigsState::NotSent,
1980                         secp_ctx,
1981                         // We'll add our counterparty's `funding_satoshis` when we receive `accept_channel2`.
1982                         channel_value_satoshis,
1983
1984                         latest_monitor_update_id: 0,
1985
1986                         holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1987                         shutdown_scriptpubkey,
1988                         destination_script,
1989
1990                         cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1991                         cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1992                         value_to_self_msat,
1993
1994                         pending_inbound_htlcs: Vec::new(),
1995                         pending_outbound_htlcs: Vec::new(),
1996                         holding_cell_htlc_updates: Vec::new(),
1997                         pending_update_fee: None,
1998                         holding_cell_update_fee: None,
1999                         next_holder_htlc_id: 0,
2000                         next_counterparty_htlc_id: 0,
2001                         update_time_counter: 1,
2002
2003                         resend_order: RAACommitmentOrder::CommitmentFirst,
2004
2005                         monitor_pending_channel_ready: false,
2006                         monitor_pending_revoke_and_ack: false,
2007                         monitor_pending_commitment_signed: false,
2008                         monitor_pending_forwards: Vec::new(),
2009                         monitor_pending_failures: Vec::new(),
2010                         monitor_pending_finalized_fulfills: Vec::new(),
2011                         monitor_pending_update_adds: Vec::new(),
2012
2013                         signer_pending_commitment_update: false,
2014                         signer_pending_funding: false,
2015
2016                         // We'll add our counterparty's `funding_satoshis` to these max commitment output assertions
2017                         // when we receive `accept_channel2`.
2018                         #[cfg(debug_assertions)]
2019                         holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
2020                         #[cfg(debug_assertions)]
2021                         counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
2022
2023                         last_sent_closing_fee: None,
2024                         pending_counterparty_closing_signed: None,
2025                         expecting_peer_commitment_signed: false,
2026                         closing_fee_limits: None,
2027                         target_closing_feerate_sats_per_kw: None,
2028
2029                         funding_tx_confirmed_in: None,
2030                         funding_tx_confirmation_height: 0,
2031                         short_channel_id: None,
2032                         channel_creation_height: current_chain_height,
2033
2034                         feerate_per_kw: commitment_feerate,
2035                         counterparty_dust_limit_satoshis: 0,
2036                         holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
2037                         counterparty_max_htlc_value_in_flight_msat: 0,
2038                         // We'll adjust this to include our counterparty's `funding_satoshis` when we
2039                         // receive `accept_channel2`.
2040                         holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
2041                         counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
2042                         holder_selected_channel_reserve_satoshis,
2043                         counterparty_htlc_minimum_msat: 0,
2044                         holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
2045                         counterparty_max_accepted_htlcs: 0,
2046                         holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
2047                         minimum_depth: None, // Filled in in accept_channel
2048
2049                         counterparty_forwarding_info: None,
2050
2051                         channel_transaction_parameters: ChannelTransactionParameters {
2052                                 holder_pubkeys: pubkeys,
2053                                 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
2054                                 is_outbound_from_holder: true,
2055                                 counterparty_parameters: None,
2056                                 funding_outpoint: None,
2057                                 channel_type_features: channel_type.clone()
2058                         },
2059                         funding_transaction: None,
2060                         is_batch_funding: None,
2061
2062                         counterparty_cur_commitment_point: None,
2063                         counterparty_prev_commitment_point: None,
2064                         counterparty_node_id,
2065
2066                         counterparty_shutdown_scriptpubkey: None,
2067
2068                         commitment_secrets: CounterpartyCommitmentSecrets::new(),
2069
2070                         channel_update_status: ChannelUpdateStatus::Enabled,
2071                         closing_signed_in_flight: false,
2072
2073                         announcement_sigs: None,
2074
2075                         #[cfg(any(test, fuzzing))]
2076                         next_local_commitment_tx_fee_info_cached: Mutex::new(None),
2077                         #[cfg(any(test, fuzzing))]
2078                         next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
2079
2080                         workaround_lnd_bug_4006: None,
2081                         sent_message_awaiting_response: None,
2082
2083                         latest_inbound_scid_alias: None,
2084                         outbound_scid_alias,
2085
2086                         channel_pending_event_emitted: false,
2087                         channel_ready_event_emitted: false,
2088
2089                         #[cfg(any(test, fuzzing))]
2090                         historical_inbound_htlc_fulfills: new_hash_set(),
2091
2092                         channel_type,
2093                         channel_keys_id,
2094
2095                         blocked_monitor_updates: Vec::new(),
2096                         local_initiated_shutdown: None,
2097                 })
2098         }
2099
2100         /// Allowed in any state (including after shutdown)
2101         pub fn get_update_time_counter(&self) -> u32 {
2102                 self.update_time_counter
2103         }
2104
2105         pub fn get_latest_monitor_update_id(&self) -> u64 {
2106                 self.latest_monitor_update_id
2107         }
2108
2109         pub fn should_announce(&self) -> bool {
2110                 self.config.announced_channel
2111         }
2112
2113         pub fn is_outbound(&self) -> bool {
2114                 self.channel_transaction_parameters.is_outbound_from_holder
2115         }
2116
2117         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
2118         /// Allowed in any state (including after shutdown)
2119         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
2120                 self.config.options.forwarding_fee_base_msat
2121         }
2122
2123         /// Returns true if we've ever received a message from the remote end for this Channel
2124         pub fn have_received_message(&self) -> bool {
2125                 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
2126         }
2127
2128         /// Returns true if this channel is fully established and not known to be closing.
2129         /// Allowed in any state (including after shutdown)
2130         pub fn is_usable(&self) -> bool {
2131                 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
2132                         !self.channel_state.is_local_shutdown_sent() &&
2133                         !self.channel_state.is_remote_shutdown_sent() &&
2134                         !self.monitor_pending_channel_ready
2135         }
2136
2137         /// shutdown state returns the state of the channel in its various stages of shutdown
2138         pub fn shutdown_state(&self) -> ChannelShutdownState {
2139                 match self.channel_state {
2140                         ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
2141                                 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
2142                                         ChannelShutdownState::ShutdownInitiated
2143                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
2144                                         ChannelShutdownState::ResolvingHTLCs
2145                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
2146                                         ChannelShutdownState::NegotiatingClosingFee
2147                                 } else {
2148                                         ChannelShutdownState::NotShuttingDown
2149                                 },
2150                         ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
2151                         _ => ChannelShutdownState::NotShuttingDown,
2152                 }
2153         }
2154
2155         fn closing_negotiation_ready(&self) -> bool {
2156                 let is_ready_to_close = match self.channel_state {
2157                         ChannelState::AwaitingChannelReady(flags) =>
2158                                 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2159                         ChannelState::ChannelReady(flags) =>
2160                                 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
2161                         _ => false,
2162                 };
2163                 self.pending_inbound_htlcs.is_empty() &&
2164                         self.pending_outbound_htlcs.is_empty() &&
2165                         self.pending_update_fee.is_none() &&
2166                         is_ready_to_close
2167         }
2168
2169         /// Returns true if this channel is currently available for use. This is a superset of
2170         /// is_usable() and considers things like the channel being temporarily disabled.
2171         /// Allowed in any state (including after shutdown)
2172         pub fn is_live(&self) -> bool {
2173                 self.is_usable() && !self.channel_state.is_peer_disconnected()
2174         }
2175
2176         // Public utilities:
2177
2178         pub fn channel_id(&self) -> ChannelId {
2179                 self.channel_id
2180         }
2181
2182         // Return the `temporary_channel_id` used during channel establishment.
2183         //
2184         // Will return `None` for channels created prior to LDK version 0.0.115.
2185         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
2186                 self.temporary_channel_id
2187         }
2188
2189         pub fn minimum_depth(&self) -> Option<u32> {
2190                 self.minimum_depth
2191         }
2192
2193         /// Gets the "user_id" value passed into the construction of this channel. It has no special
2194         /// meaning and exists only to allow users to have a persistent identifier of a channel.
2195         pub fn get_user_id(&self) -> u128 {
2196                 self.user_id
2197         }
2198
2199         /// Gets the channel's type
2200         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
2201                 &self.channel_type
2202         }
2203
2204         /// Gets the channel's `short_channel_id`.
2205         ///
2206         /// Will return `None` if the channel hasn't been confirmed yet.
2207         pub fn get_short_channel_id(&self) -> Option<u64> {
2208                 self.short_channel_id
2209         }
2210
2211         /// Allowed in any state (including after shutdown)
2212         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
2213                 self.latest_inbound_scid_alias
2214         }
2215
2216         /// Allowed in any state (including after shutdown)
2217         pub fn outbound_scid_alias(&self) -> u64 {
2218                 self.outbound_scid_alias
2219         }
2220
2221         /// Returns the holder signer for this channel.
2222         #[cfg(test)]
2223         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
2224                 return &self.holder_signer
2225         }
2226
2227         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
2228         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
2229         /// or prior to any channel actions during `Channel` initialization.
2230         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
2231                 debug_assert_eq!(self.outbound_scid_alias, 0);
2232                 self.outbound_scid_alias = outbound_scid_alias;
2233         }
2234
2235         /// Returns the funding_txo we either got from our peer, or were given by
2236         /// get_funding_created.
2237         pub fn get_funding_txo(&self) -> Option<OutPoint> {
2238                 self.channel_transaction_parameters.funding_outpoint
2239         }
2240
2241         /// Returns the height in which our funding transaction was confirmed.
2242         pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
2243                 let conf_height = self.funding_tx_confirmation_height;
2244                 if conf_height > 0 {
2245                         Some(conf_height)
2246                 } else {
2247                         None
2248                 }
2249         }
2250
2251         /// Returns the block hash in which our funding transaction was confirmed.
2252         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
2253                 self.funding_tx_confirmed_in
2254         }
2255
2256         /// Returns the current number of confirmations on the funding transaction.
2257         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
2258                 if self.funding_tx_confirmation_height == 0 {
2259                         // We either haven't seen any confirmation yet, or observed a reorg.
2260                         return 0;
2261                 }
2262
2263                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2264         }
2265
2266         fn get_holder_selected_contest_delay(&self) -> u16 {
2267                 self.channel_transaction_parameters.holder_selected_contest_delay
2268         }
2269
2270         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2271                 &self.channel_transaction_parameters.holder_pubkeys
2272         }
2273
2274         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2275                 self.channel_transaction_parameters.counterparty_parameters
2276                         .as_ref().map(|params| params.selected_contest_delay)
2277         }
2278
2279         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2280                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2281         }
2282
2283         /// Allowed in any state (including after shutdown)
2284         pub fn get_counterparty_node_id(&self) -> PublicKey {
2285                 self.counterparty_node_id
2286         }
2287
2288         /// Allowed in any state (including after shutdown)
2289         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2290                 self.holder_htlc_minimum_msat
2291         }
2292
2293         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2294         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2295                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2296         }
2297
2298         /// Allowed in any state (including after shutdown)
2299         pub fn get_announced_htlc_max_msat(&self) -> u64 {
2300                 return cmp::min(
2301                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2302                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
2303                         // channel might have been used to route very small values (either by honest users or as DoS).
2304                         self.channel_value_satoshis * 1000 * 9 / 10,
2305
2306                         self.counterparty_max_htlc_value_in_flight_msat
2307                 );
2308         }
2309
2310         /// Allowed in any state (including after shutdown)
2311         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2312                 self.counterparty_htlc_minimum_msat
2313         }
2314
2315         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2316         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2317                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2318         }
2319
2320         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2321                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2322                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2323                         cmp::min(
2324                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2325                                 party_max_htlc_value_in_flight_msat
2326                         )
2327                 })
2328         }
2329
2330         pub fn get_value_satoshis(&self) -> u64 {
2331                 self.channel_value_satoshis
2332         }
2333
2334         pub fn get_fee_proportional_millionths(&self) -> u32 {
2335                 self.config.options.forwarding_fee_proportional_millionths
2336         }
2337
2338         pub fn get_cltv_expiry_delta(&self) -> u16 {
2339                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2340         }
2341
2342         pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
2343                 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
2344         where F::Target: FeeEstimator
2345         {
2346                 match self.config.options.max_dust_htlc_exposure {
2347                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2348                                 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
2349                                         ConfirmationTarget::OnChainSweep) as u64;
2350                                 feerate_per_kw.saturating_mul(multiplier)
2351                         },
2352                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2353                 }
2354         }
2355
2356         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2357         pub fn prev_config(&self) -> Option<ChannelConfig> {
2358                 self.prev_config.map(|prev_config| prev_config.0)
2359         }
2360
2361         // Checks whether we should emit a `ChannelPending` event.
2362         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2363                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2364         }
2365
2366         // Returns whether we already emitted a `ChannelPending` event.
2367         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2368                 self.channel_pending_event_emitted
2369         }
2370
2371         // Remembers that we already emitted a `ChannelPending` event.
2372         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2373                 self.channel_pending_event_emitted = true;
2374         }
2375
2376         // Checks whether we should emit a `ChannelReady` event.
2377         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2378                 self.is_usable() && !self.channel_ready_event_emitted
2379         }
2380
2381         // Remembers that we already emitted a `ChannelReady` event.
2382         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2383                 self.channel_ready_event_emitted = true;
2384         }
2385
2386         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2387         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2388         /// no longer be considered when forwarding HTLCs.
2389         pub fn maybe_expire_prev_config(&mut self) {
2390                 if self.prev_config.is_none() {
2391                         return;
2392                 }
2393                 let prev_config = self.prev_config.as_mut().unwrap();
2394                 prev_config.1 += 1;
2395                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2396                         self.prev_config = None;
2397                 }
2398         }
2399
2400         /// Returns the current [`ChannelConfig`] applied to the channel.
2401         pub fn config(&self) -> ChannelConfig {
2402                 self.config.options
2403         }
2404
2405         /// Updates the channel's config. A bool is returned indicating whether the config update
2406         /// applied resulted in a new ChannelUpdate message.
2407         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2408                 let did_channel_update =
2409                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2410                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2411                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2412                 if did_channel_update {
2413                         self.prev_config = Some((self.config.options, 0));
2414                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2415                         // policy change to propagate throughout the network.
2416                         self.update_time_counter += 1;
2417                 }
2418                 self.config.options = *config;
2419                 did_channel_update
2420         }
2421
2422         /// Returns true if funding_signed was sent/received and the
2423         /// funding transaction has been broadcast if necessary.
2424         pub fn is_funding_broadcast(&self) -> bool {
2425                 !self.channel_state.is_pre_funded_state() &&
2426                         !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2427         }
2428
2429         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2430         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2431         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2432         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2433         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2434         /// an HTLC to a).
2435         /// @local is used only to convert relevant internal structures which refer to remote vs local
2436         /// to decide value of outputs and direction of HTLCs.
2437         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2438         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2439         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2440         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2441         /// which peer generated this transaction and "to whom" this transaction flows.
2442         #[inline]
2443         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2444                 where L::Target: Logger
2445         {
2446                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2447                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2448                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2449
2450                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2451                 let mut remote_htlc_total_msat = 0;
2452                 let mut local_htlc_total_msat = 0;
2453                 let mut value_to_self_msat_offset = 0;
2454
2455                 let mut feerate_per_kw = self.feerate_per_kw;
2456                 if let Some((feerate, update_state)) = self.pending_update_fee {
2457                         if match update_state {
2458                                 // Note that these match the inclusion criteria when scanning
2459                                 // pending_inbound_htlcs below.
2460                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2461                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2462                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
2463                         } {
2464                                 feerate_per_kw = feerate;
2465                         }
2466                 }
2467
2468                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2469                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2470                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2471                         &self.channel_id,
2472                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2473
2474                 macro_rules! get_htlc_in_commitment {
2475                         ($htlc: expr, $offered: expr) => {
2476                                 HTLCOutputInCommitment {
2477                                         offered: $offered,
2478                                         amount_msat: $htlc.amount_msat,
2479                                         cltv_expiry: $htlc.cltv_expiry,
2480                                         payment_hash: $htlc.payment_hash,
2481                                         transaction_output_index: None
2482                                 }
2483                         }
2484                 }
2485
2486                 macro_rules! add_htlc_output {
2487                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2488                                 if $outbound == local { // "offered HTLC output"
2489                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2490                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2491                                                 0
2492                                         } else {
2493                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2494                                         };
2495                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2496                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2497                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
2498                                         } else {
2499                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2500                                                 included_dust_htlcs.push((htlc_in_tx, $source));
2501                                         }
2502                                 } else {
2503                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2504                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2505                                                 0
2506                                         } else {
2507                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2508                                         };
2509                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2510                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2511                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
2512                                         } else {
2513                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2514                                                 included_dust_htlcs.push((htlc_in_tx, $source));
2515                                         }
2516                                 }
2517                         }
2518                 }
2519
2520                 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2521
2522                 for ref htlc in self.pending_inbound_htlcs.iter() {
2523                         let (include, state_name) = match htlc.state {
2524                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2525                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2526                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2527                                 InboundHTLCState::Committed => (true, "Committed"),
2528                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2529                         };
2530
2531                         if include {
2532                                 add_htlc_output!(htlc, false, None, state_name);
2533                                 remote_htlc_total_msat += htlc.amount_msat;
2534                         } else {
2535                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2536                                 match &htlc.state {
2537                                         &InboundHTLCState::LocalRemoved(ref reason) => {
2538                                                 if generated_by_local {
2539                                                         if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2540                                                                 inbound_htlc_preimages.push(preimage);
2541                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
2542                                                         }
2543                                                 }
2544                                         },
2545                                         _ => {},
2546                                 }
2547                         }
2548                 }
2549
2550
2551                 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2552
2553                 for ref htlc in self.pending_outbound_htlcs.iter() {
2554                         let (include, state_name) = match htlc.state {
2555                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2556                                 OutboundHTLCState::Committed => (true, "Committed"),
2557                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2558                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2559                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2560                         };
2561
2562                         let preimage_opt = match htlc.state {
2563                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2564                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2565                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2566                                 _ => None,
2567                         };
2568
2569                         if let Some(preimage) = preimage_opt {
2570                                 outbound_htlc_preimages.push(preimage);
2571                         }
2572
2573                         if include {
2574                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2575                                 local_htlc_total_msat += htlc.amount_msat;
2576                         } else {
2577                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2578                                 match htlc.state {
2579                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2580                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
2581                                         },
2582                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2583                                                 if !generated_by_local {
2584                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
2585                                                 }
2586                                         },
2587                                         _ => {},
2588                                 }
2589                         }
2590                 }
2591
2592                 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2593                 assert!(value_to_self_msat >= 0);
2594                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2595                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2596                 // "violate" their reserve value by couting those against it. Thus, we have to convert
2597                 // everything to i64 before subtracting as otherwise we can overflow.
2598                 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2599                 assert!(value_to_remote_msat >= 0);
2600
2601                 #[cfg(debug_assertions)]
2602                 {
2603                         // Make sure that the to_self/to_remote is always either past the appropriate
2604                         // channel_reserve *or* it is making progress towards it.
2605                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2606                                 self.holder_max_commitment_tx_output.lock().unwrap()
2607                         } else {
2608                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
2609                         };
2610                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2611                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2612                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2613                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2614                 }
2615
2616                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2617                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2618                 let (value_to_self, value_to_remote) = if self.is_outbound() {
2619                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2620                 } else {
2621                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2622                 };
2623
2624                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2625                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2626                 let (funding_pubkey_a, funding_pubkey_b) = if local {
2627                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2628                 } else {
2629                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2630                 };
2631
2632                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2633                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2634                 } else {
2635                         value_to_a = 0;
2636                 }
2637
2638                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2639                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2640                 } else {
2641                         value_to_b = 0;
2642                 }
2643
2644                 let num_nondust_htlcs = included_non_dust_htlcs.len();
2645
2646                 let channel_parameters =
2647                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2648                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2649                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2650                                                                              value_to_a as u64,
2651                                                                              value_to_b as u64,
2652                                                                              funding_pubkey_a,
2653                                                                              funding_pubkey_b,
2654                                                                              keys.clone(),
2655                                                                              feerate_per_kw,
2656                                                                              &mut included_non_dust_htlcs,
2657                                                                              &channel_parameters
2658                 );
2659                 let mut htlcs_included = included_non_dust_htlcs;
2660                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2661                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2662                 htlcs_included.append(&mut included_dust_htlcs);
2663
2664                 CommitmentStats {
2665                         tx,
2666                         feerate_per_kw,
2667                         total_fee_sat,
2668                         num_nondust_htlcs,
2669                         htlcs_included,
2670                         local_balance_msat: value_to_self_msat as u64,
2671                         remote_balance_msat: value_to_remote_msat as u64,
2672                         inbound_htlc_preimages,
2673                         outbound_htlc_preimages,
2674                 }
2675         }
2676
2677         #[inline]
2678         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2679         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2680         /// our counterparty!)
2681         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2682         /// TODO Some magic rust shit to compile-time check this?
2683         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2684                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2685                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2686                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2687                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2688
2689                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2690         }
2691
2692         #[inline]
2693         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2694         /// will sign and send to our counterparty.
2695         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2696         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2697                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2698                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2699                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2700
2701                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2702         }
2703
2704         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2705         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2706         /// Panics if called before accept_channel/InboundV1Channel::new
2707         pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2708                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2709         }
2710
2711         fn counterparty_funding_pubkey(&self) -> &PublicKey {
2712                 &self.get_counterparty_pubkeys().funding_pubkey
2713         }
2714
2715         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2716                 self.feerate_per_kw
2717         }
2718
2719         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2720                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2721                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2722                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2723                 // more dust balance if the feerate increases when we have several HTLCs pending
2724                 // which are near the dust limit.
2725                 let mut feerate_per_kw = self.feerate_per_kw;
2726                 // If there's a pending update fee, use it to ensure we aren't under-estimating
2727                 // potential feerate updates coming soon.
2728                 if let Some((feerate, _)) = self.pending_update_fee {
2729                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2730                 }
2731                 if let Some(feerate) = outbound_feerate_update {
2732                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2733                 }
2734                 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2735                 cmp::max(feerate_per_kw + 2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2736         }
2737
2738         /// Get forwarding information for the counterparty.
2739         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2740                 self.counterparty_forwarding_info.clone()
2741         }
2742
2743         /// Returns a HTLCStats about pending htlcs
2744         fn get_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2745                 let context = self;
2746                 let uses_0_htlc_fee_anchors = self.get_channel_type().supports_anchors_zero_fee_htlc_tx();
2747
2748                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if uses_0_htlc_fee_anchors {
2749                         (0, 0)
2750                 } else {
2751                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2752                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2753                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2754                 };
2755
2756                 let mut on_holder_tx_dust_exposure_msat = 0;
2757                 let mut on_counterparty_tx_dust_exposure_msat = 0;
2758
2759                 let mut pending_inbound_htlcs_value_msat = 0;
2760                 {
2761                         let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2762                         let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2763                         for ref htlc in context.pending_inbound_htlcs.iter() {
2764                                 pending_inbound_htlcs_value_msat += htlc.amount_msat;
2765                                 if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2766                                         on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2767                                 }
2768                                 if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2769                                         on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2770                                 }
2771                         }
2772                 }
2773
2774                 let mut pending_outbound_htlcs_value_msat = 0;
2775                 let mut outbound_holding_cell_msat = 0;
2776                 let mut on_holder_tx_outbound_holding_cell_htlcs_count = 0;
2777                 let mut pending_outbound_htlcs = self.pending_outbound_htlcs.len();
2778                 {
2779                         let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2780                         let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2781                         for ref htlc in context.pending_outbound_htlcs.iter() {
2782                                 pending_outbound_htlcs_value_msat += htlc.amount_msat;
2783                                 if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2784                                         on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2785                                 }
2786                                 if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2787                                         on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2788                                 }
2789                         }
2790
2791                         for update in context.holding_cell_htlc_updates.iter() {
2792                                 if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2793                                         pending_outbound_htlcs += 1;
2794                                         pending_outbound_htlcs_value_msat += amount_msat;
2795                                         outbound_holding_cell_msat += amount_msat;
2796                                         if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2797                                                 on_counterparty_tx_dust_exposure_msat += amount_msat;
2798                                         }
2799                                         if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2800                                                 on_holder_tx_dust_exposure_msat += amount_msat;
2801                                         } else {
2802                                                 on_holder_tx_outbound_holding_cell_htlcs_count += 1;
2803                                         }
2804                                 }
2805                         }
2806                 }
2807
2808                 HTLCStats {
2809                         pending_inbound_htlcs: self.pending_inbound_htlcs.len(),
2810                         pending_outbound_htlcs,
2811                         pending_inbound_htlcs_value_msat,
2812                         pending_outbound_htlcs_value_msat,
2813                         on_counterparty_tx_dust_exposure_msat,
2814                         on_holder_tx_dust_exposure_msat,
2815                         outbound_holding_cell_msat,
2816                         on_holder_tx_outbound_holding_cell_htlcs_count,
2817                 }
2818         }
2819
2820         /// Returns information on all pending inbound HTLCs.
2821         pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2822                 let mut holding_cell_states = new_hash_map();
2823                 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2824                         match holding_cell_update {
2825                                 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2826                                         holding_cell_states.insert(
2827                                                 htlc_id,
2828                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2829                                         );
2830                                 },
2831                                 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2832                                         holding_cell_states.insert(
2833                                                 htlc_id,
2834                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2835                                         );
2836                                 },
2837                                 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2838                                         holding_cell_states.insert(
2839                                                 htlc_id,
2840                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2841                                         );
2842                                 },
2843                                 // Outbound HTLC.
2844                                 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2845                         }
2846                 }
2847                 let mut inbound_details = Vec::new();
2848                 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2849                         0
2850                 } else {
2851                         let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2852                         dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2853                 };
2854                 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2855                 for htlc in self.pending_inbound_htlcs.iter() {
2856                         if let Some(state_details) = (&htlc.state).into() {
2857                                 inbound_details.push(InboundHTLCDetails{
2858                                         htlc_id: htlc.htlc_id,
2859                                         amount_msat: htlc.amount_msat,
2860                                         cltv_expiry: htlc.cltv_expiry,
2861                                         payment_hash: htlc.payment_hash,
2862                                         state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2863                                         is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2864                                 });
2865                         }
2866                 }
2867                 inbound_details
2868         }
2869
2870         /// Returns information on all pending outbound HTLCs.
2871         pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2872                 let mut outbound_details = Vec::new();
2873                 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2874                         0
2875                 } else {
2876                         let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2877                         dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2878                 };
2879                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2880                 for htlc in self.pending_outbound_htlcs.iter() {
2881                         outbound_details.push(OutboundHTLCDetails{
2882                                 htlc_id: Some(htlc.htlc_id),
2883                                 amount_msat: htlc.amount_msat,
2884                                 cltv_expiry: htlc.cltv_expiry,
2885                                 payment_hash: htlc.payment_hash,
2886                                 skimmed_fee_msat: htlc.skimmed_fee_msat,
2887                                 state: Some((&htlc.state).into()),
2888                                 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2889                         });
2890                 }
2891                 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2892                         if let HTLCUpdateAwaitingACK::AddHTLC {
2893                                 amount_msat,
2894                                 cltv_expiry,
2895                                 payment_hash,
2896                                 skimmed_fee_msat,
2897                                 ..
2898                         } = *holding_cell_update {
2899                                 outbound_details.push(OutboundHTLCDetails{
2900                                         htlc_id: None,
2901                                         amount_msat: amount_msat,
2902                                         cltv_expiry: cltv_expiry,
2903                                         payment_hash: payment_hash,
2904                                         skimmed_fee_msat: skimmed_fee_msat,
2905                                         state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2906                                         is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2907                                 });
2908                         }
2909                 }
2910                 outbound_details
2911         }
2912
2913         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2914         /// Doesn't bother handling the
2915         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2916         /// corner case properly.
2917         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2918         -> AvailableBalances
2919         where F::Target: FeeEstimator
2920         {
2921                 let context = &self;
2922                 // Note that we have to handle overflow due to the above case.
2923                 let htlc_stats = context.get_pending_htlc_stats(None);
2924
2925                 let mut balance_msat = context.value_to_self_msat;
2926                 for ref htlc in context.pending_inbound_htlcs.iter() {
2927                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2928                                 balance_msat += htlc.amount_msat;
2929                         }
2930                 }
2931                 balance_msat -= htlc_stats.pending_outbound_htlcs_value_msat;
2932
2933                 let outbound_capacity_msat = context.value_to_self_msat
2934                                 .saturating_sub(htlc_stats.pending_outbound_htlcs_value_msat)
2935                                 .saturating_sub(
2936                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2937
2938                 let mut available_capacity_msat = outbound_capacity_msat;
2939
2940                 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2941                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2942                 } else {
2943                         0
2944                 };
2945                 if context.is_outbound() {
2946                         // We should mind channel commit tx fee when computing how much of the available capacity
2947                         // can be used in the next htlc. Mirrors the logic in send_htlc.
2948                         //
2949                         // The fee depends on whether the amount we will be sending is above dust or not,
2950                         // and the answer will in turn change the amount itself â€” making it a circular
2951                         // dependency.
2952                         // This complicates the computation around dust-values, up to the one-htlc-value.
2953                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2954                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2955                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2956                         }
2957
2958                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2959                         let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2960                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2961                         let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2962                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2963                                 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2964                                 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2965                         }
2966
2967                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
2968                         // value ends up being below dust, we have this fee available again. In that case,
2969                         // match the value to right-below-dust.
2970                         let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2971                                 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2972                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2973                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2974                                 debug_assert!(one_htlc_difference_msat != 0);
2975                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2976                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2977                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2978                         } else {
2979                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2980                         }
2981                 } else {
2982                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2983                         // sending a new HTLC won't reduce their balance below our reserve threshold.
2984                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2985                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2986                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2987                         }
2988
2989                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2990                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2991
2992                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2993                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2994                                 .saturating_sub(htlc_stats.pending_inbound_htlcs_value_msat);
2995
2996                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2997                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2998                                 // we've selected for them, we can only send dust HTLCs.
2999                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
3000                         }
3001                 }
3002
3003                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
3004
3005                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
3006                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
3007                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
3008                 // send above the dust limit (as the router can always overpay to meet the dust limit).
3009                 let mut remaining_msat_below_dust_exposure_limit = None;
3010                 let mut dust_exposure_dust_limit_msat = 0;
3011                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
3012
3013                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3014                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
3015                 } else {
3016                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
3017                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3018                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3019                 };
3020                 if htlc_stats.on_counterparty_tx_dust_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
3021                         remaining_msat_below_dust_exposure_limit =
3022                                 Some(max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_counterparty_tx_dust_exposure_msat));
3023                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
3024                 }
3025
3026                 if htlc_stats.on_holder_tx_dust_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
3027                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
3028                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
3029                                 max_dust_htlc_exposure_msat.saturating_sub(htlc_stats.on_holder_tx_dust_exposure_msat)));
3030                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
3031                 }
3032
3033                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
3034                         if available_capacity_msat < dust_exposure_dust_limit_msat {
3035                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
3036                         } else {
3037                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
3038                         }
3039                 }
3040
3041                 available_capacity_msat = cmp::min(available_capacity_msat,
3042                         context.counterparty_max_htlc_value_in_flight_msat - htlc_stats.pending_outbound_htlcs_value_msat);
3043
3044                 if htlc_stats.pending_outbound_htlcs + 1 > context.counterparty_max_accepted_htlcs as usize {
3045                         available_capacity_msat = 0;
3046                 }
3047
3048                 AvailableBalances {
3049                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
3050                                         - context.value_to_self_msat as i64
3051                                         - htlc_stats.pending_inbound_htlcs_value_msat as i64
3052                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
3053                                 0) as u64,
3054                         outbound_capacity_msat,
3055                         next_outbound_htlc_limit_msat: available_capacity_msat,
3056                         next_outbound_htlc_minimum_msat,
3057                         balance_msat,
3058                 }
3059         }
3060
3061         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
3062                 let context = &self;
3063                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
3064         }
3065
3066         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
3067         /// number of pending HTLCs that are on track to be in our next commitment tx.
3068         ///
3069         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3070         /// `fee_spike_buffer_htlc` is `Some`.
3071         ///
3072         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3073         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3074         ///
3075         /// Dust HTLCs are excluded.
3076         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3077                 let context = &self;
3078                 assert!(context.is_outbound());
3079
3080                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3081                         (0, 0)
3082                 } else {
3083                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3084                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3085                 };
3086                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
3087                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
3088
3089                 let mut addl_htlcs = 0;
3090                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3091                 match htlc.origin {
3092                         HTLCInitiator::LocalOffered => {
3093                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3094                                         addl_htlcs += 1;
3095                                 }
3096                         },
3097                         HTLCInitiator::RemoteOffered => {
3098                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3099                                         addl_htlcs += 1;
3100                                 }
3101                         }
3102                 }
3103
3104                 let mut included_htlcs = 0;
3105                 for ref htlc in context.pending_inbound_htlcs.iter() {
3106                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
3107                                 continue
3108                         }
3109                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
3110                         // transaction including this HTLC if it times out before they RAA.
3111                         included_htlcs += 1;
3112                 }
3113
3114                 for ref htlc in context.pending_outbound_htlcs.iter() {
3115                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
3116                                 continue
3117                         }
3118                         match htlc.state {
3119                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
3120                                 OutboundHTLCState::Committed => included_htlcs += 1,
3121                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3122                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
3123                                 // transaction won't be generated until they send us their next RAA, which will mean
3124                                 // dropping any HTLCs in this state.
3125                                 _ => {},
3126                         }
3127                 }
3128
3129                 for htlc in context.holding_cell_htlc_updates.iter() {
3130                         match htlc {
3131                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
3132                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
3133                                                 continue
3134                                         }
3135                                         included_htlcs += 1
3136                                 },
3137                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
3138                                          // ack we're guaranteed to never include them in commitment txs anymore.
3139                         }
3140                 }
3141
3142                 let num_htlcs = included_htlcs + addl_htlcs;
3143                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3144                 #[cfg(any(test, fuzzing))]
3145                 {
3146                         let mut fee = res;
3147                         if fee_spike_buffer_htlc.is_some() {
3148                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3149                         }
3150                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
3151                                 + context.holding_cell_htlc_updates.len();
3152                         let commitment_tx_info = CommitmentTxInfoCached {
3153                                 fee,
3154                                 total_pending_htlcs,
3155                                 next_holder_htlc_id: match htlc.origin {
3156                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3157                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3158                                 },
3159                                 next_counterparty_htlc_id: match htlc.origin {
3160                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3161                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3162                                 },
3163                                 feerate: context.feerate_per_kw,
3164                         };
3165                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3166                 }
3167                 res
3168         }
3169
3170         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
3171         /// pending HTLCs that are on track to be in their next commitment tx
3172         ///
3173         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
3174         /// `fee_spike_buffer_htlc` is `Some`.
3175         ///
3176         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
3177         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
3178         ///
3179         /// Dust HTLCs are excluded.
3180         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
3181                 let context = &self;
3182                 assert!(!context.is_outbound());
3183
3184                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3185                         (0, 0)
3186                 } else {
3187                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
3188                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
3189                 };
3190                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
3191                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
3192
3193                 let mut addl_htlcs = 0;
3194                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
3195                 match htlc.origin {
3196                         HTLCInitiator::LocalOffered => {
3197                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
3198                                         addl_htlcs += 1;
3199                                 }
3200                         },
3201                         HTLCInitiator::RemoteOffered => {
3202                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
3203                                         addl_htlcs += 1;
3204                                 }
3205                         }
3206                 }
3207
3208                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
3209                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
3210                 // committed outbound HTLCs, see below.
3211                 let mut included_htlcs = 0;
3212                 for ref htlc in context.pending_inbound_htlcs.iter() {
3213                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
3214                                 continue
3215                         }
3216                         included_htlcs += 1;
3217                 }
3218
3219                 for ref htlc in context.pending_outbound_htlcs.iter() {
3220                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
3221                                 continue
3222                         }
3223                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
3224                         // i.e. if they've responded to us with an RAA after announcement.
3225                         match htlc.state {
3226                                 OutboundHTLCState::Committed => included_htlcs += 1,
3227                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
3228                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
3229                                 _ => {},
3230                         }
3231                 }
3232
3233                 let num_htlcs = included_htlcs + addl_htlcs;
3234                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
3235                 #[cfg(any(test, fuzzing))]
3236                 {
3237                         let mut fee = res;
3238                         if fee_spike_buffer_htlc.is_some() {
3239                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
3240                         }
3241                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
3242                         let commitment_tx_info = CommitmentTxInfoCached {
3243                                 fee,
3244                                 total_pending_htlcs,
3245                                 next_holder_htlc_id: match htlc.origin {
3246                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
3247                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
3248                                 },
3249                                 next_counterparty_htlc_id: match htlc.origin {
3250                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
3251                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
3252                                 },
3253                                 feerate: context.feerate_per_kw,
3254                         };
3255                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3256                 }
3257                 res
3258         }
3259
3260         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3261                 match self.channel_state {
3262                         ChannelState::FundingNegotiated => f(),
3263                         ChannelState::AwaitingChannelReady(flags) =>
3264                                 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3265                                         flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3266                                 {
3267                                         f()
3268                                 } else {
3269                                         None
3270                                 },
3271                         _ => None,
3272                 }
3273         }
3274
3275         /// Returns the transaction if there is a pending funding transaction that is yet to be
3276         /// broadcast.
3277         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3278                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3279         }
3280
3281         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3282         /// broadcast.
3283         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3284                 self.if_unbroadcasted_funding(||
3285                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3286                 )
3287         }
3288
3289         /// Returns whether the channel is funded in a batch.
3290         pub fn is_batch_funding(&self) -> bool {
3291                 self.is_batch_funding.is_some()
3292         }
3293
3294         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3295         /// broadcast.
3296         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3297                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3298         }
3299
3300         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3301         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3302         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3303         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3304         /// immediately (others we will have to allow to time out).
3305         pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3306                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3307                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3308                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3309                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3310                 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3311
3312                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3313                 // return them to fail the payment.
3314                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3315                 let counterparty_node_id = self.get_counterparty_node_id();
3316                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3317                         match htlc_update {
3318                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3319                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3320                                 },
3321                                 _ => {}
3322                         }
3323                 }
3324                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3325                         // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3326                         // returning a channel monitor update here would imply a channel monitor update before
3327                         // we even registered the channel monitor to begin with, which is invalid.
3328                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
3329                         // funding transaction, don't return a funding txo (which prevents providing the
3330                         // monitor update to the user, even if we return one).
3331                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3332                         if !self.channel_state.is_pre_funded_state() {
3333                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3334                                 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3335                                         update_id: self.latest_monitor_update_id,
3336                                         counterparty_node_id: Some(self.counterparty_node_id),
3337                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3338                                         channel_id: Some(self.channel_id()),
3339                                 }))
3340                         } else { None }
3341                 } else { None };
3342                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3343                 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3344
3345                 self.channel_state = ChannelState::ShutdownComplete;
3346                 self.update_time_counter += 1;
3347                 ShutdownResult {
3348                         closure_reason,
3349                         monitor_update,
3350                         dropped_outbound_htlcs,
3351                         unbroadcasted_batch_funding_txid,
3352                         channel_id: self.channel_id,
3353                         user_channel_id: self.user_id,
3354                         channel_capacity_satoshis: self.channel_value_satoshis,
3355                         counterparty_node_id: self.counterparty_node_id,
3356                         unbroadcasted_funding_tx,
3357                         channel_funding_txo: self.get_funding_txo(),
3358                 }
3359         }
3360
3361         /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3362         fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3363                 let counterparty_keys = self.build_remote_transaction_keys();
3364                 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3365
3366                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3367                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3368                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3369                         &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3370
3371                 match &self.holder_signer {
3372                         // TODO (arik): move match into calling method for Taproot
3373                         ChannelSignerType::Ecdsa(ecdsa) => {
3374                                 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3375                                         .map(|(signature, _)| msgs::FundingSigned {
3376                                                 channel_id: self.channel_id(),
3377                                                 signature,
3378                                                 #[cfg(taproot)]
3379                                                 partial_signature_with_nonce: None,
3380                                         })
3381                                         .ok();
3382
3383                                 if funding_signed.is_none() {
3384                                         #[cfg(not(async_signing))] {
3385                                                 panic!("Failed to get signature for funding_signed");
3386                                         }
3387                                         #[cfg(async_signing)] {
3388                                                 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3389                                                 self.signer_pending_funding = true;
3390                                         }
3391                                 } else if self.signer_pending_funding {
3392                                         log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3393                                         self.signer_pending_funding = false;
3394                                 }
3395
3396                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3397                                 (counterparty_initial_commitment_tx, funding_signed)
3398                         },
3399                         // TODO (taproot|arik)
3400                         #[cfg(taproot)]
3401                         _ => todo!()
3402                 }
3403         }
3404
3405         /// If we receive an error message when attempting to open a channel, it may only be a rejection
3406         /// of the channel type we tried, not of our ability to open any channel at all. We can see if a
3407         /// downgrade of channel features would be possible so that we can still open the channel.
3408         pub(crate) fn maybe_downgrade_channel_features<F: Deref>(
3409                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>
3410         ) -> Result<(), ()>
3411         where
3412                 F::Target: FeeEstimator
3413         {
3414                 if !self.is_outbound() ||
3415                         !matches!(
3416                                 self.channel_state, ChannelState::NegotiatingFunding(flags)
3417                                 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
3418                         )
3419                 {
3420                         return Err(());
3421                 }
3422                 if self.channel_type == ChannelTypeFeatures::only_static_remote_key() {
3423                         // We've exhausted our options
3424                         return Err(());
3425                 }
3426                 // We support opening a few different types of channels. Try removing our additional
3427                 // features one by one until we've either arrived at our default or the counterparty has
3428                 // accepted one.
3429                 //
3430                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
3431                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
3432                 // checks whether the counterparty supports every feature, this would only happen if the
3433                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
3434                 // whatever reason.
3435                 if self.channel_type.supports_anchors_zero_fee_htlc_tx() {
3436                         self.channel_type.clear_anchors_zero_fee_htlc_tx();
3437                         self.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
3438                         assert!(!self.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
3439                 } else if self.channel_type.supports_scid_privacy() {
3440                         self.channel_type.clear_scid_privacy();
3441                 } else {
3442                         self.channel_type = ChannelTypeFeatures::only_static_remote_key();
3443                 }
3444                 self.channel_transaction_parameters.channel_type_features = self.channel_type.clone();
3445                 Ok(())
3446         }
3447 }
3448
3449 // Internal utility functions for channels
3450
3451 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3452 /// `channel_value_satoshis` in msat, set through
3453 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3454 ///
3455 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3456 ///
3457 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3458 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3459         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3460                 1
3461         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3462                 100
3463         } else {
3464                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3465         };
3466         channel_value_satoshis * 10 * configured_percent
3467 }
3468
3469 /// Returns a minimum channel reserve value the remote needs to maintain,
3470 /// required by us according to the configured or default
3471 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3472 ///
3473 /// Guaranteed to return a value no larger than channel_value_satoshis
3474 ///
3475 /// This is used both for outbound and inbound channels and has lower bound
3476 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3477 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3478         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3479         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3480 }
3481
3482 /// This is for legacy reasons, present for forward-compatibility.
3483 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3484 /// from storage. Hence, we use this function to not persist default values of
3485 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3486 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3487         let (q, _) = channel_value_satoshis.overflowing_div(100);
3488         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3489 }
3490
3491 /// Returns a minimum channel reserve value each party needs to maintain, fixed in the spec to a
3492 /// default of 1% of the total channel value.
3493 ///
3494 /// Guaranteed to return a value no larger than channel_value_satoshis
3495 ///
3496 /// This is used both for outbound and inbound channels and has lower bound
3497 /// of `dust_limit_satoshis`.
3498 #[cfg(any(dual_funding, splicing))]
3499 fn get_v2_channel_reserve_satoshis(channel_value_satoshis: u64, dust_limit_satoshis: u64) -> u64 {
3500         // Fixed at 1% of channel value by spec.
3501         let (q, _) = channel_value_satoshis.overflowing_div(100);
3502         cmp::min(channel_value_satoshis, cmp::max(q, dust_limit_satoshis))
3503 }
3504
3505 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3506 // Note that num_htlcs should not include dust HTLCs.
3507 #[inline]
3508 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3509         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3510 }
3511
3512 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3513 // Note that num_htlcs should not include dust HTLCs.
3514 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3515         // Note that we need to divide before multiplying to round properly,
3516         // since the lowest denomination of bitcoin on-chain is the satoshi.
3517         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3518 }
3519
3520 /// Context for dual-funded channels.
3521 #[cfg(any(dual_funding, splicing))]
3522 pub(super) struct DualFundingChannelContext {
3523         /// The amount in satoshis we will be contributing to the channel.
3524         pub our_funding_satoshis: u64,
3525         /// The amount in satoshis our counterparty will be contributing to the channel.
3526         pub their_funding_satoshis: u64,
3527         /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3528         /// to the current block height to align incentives against fee-sniping.
3529         pub funding_tx_locktime: u32,
3530         /// The feerate set by the initiator to be used for the funding transaction.
3531         pub funding_feerate_sat_per_1000_weight: u32,
3532 }
3533
3534 // Holder designates channel data owned for the benefit of the user client.
3535 // Counterparty designates channel data owned by the another channel participant entity.
3536 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3537         pub context: ChannelContext<SP>,
3538         #[cfg(any(dual_funding, splicing))]
3539         pub dual_funding_channel_context: Option<DualFundingChannelContext>,
3540 }
3541
3542 #[cfg(any(test, fuzzing))]
3543 struct CommitmentTxInfoCached {
3544         fee: u64,
3545         total_pending_htlcs: usize,
3546         next_holder_htlc_id: u64,
3547         next_counterparty_htlc_id: u64,
3548         feerate: u32,
3549 }
3550
3551 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3552 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3553 trait FailHTLCContents {
3554         type Message: FailHTLCMessageName;
3555         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3556         fn to_inbound_htlc_state(self) -> InboundHTLCState;
3557         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3558 }
3559 impl FailHTLCContents for msgs::OnionErrorPacket {
3560         type Message = msgs::UpdateFailHTLC;
3561         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3562                 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3563         }
3564         fn to_inbound_htlc_state(self) -> InboundHTLCState {
3565                 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3566         }
3567         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3568                 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3569         }
3570 }
3571 impl FailHTLCContents for ([u8; 32], u16) {
3572         type Message = msgs::UpdateFailMalformedHTLC;
3573         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3574                 msgs::UpdateFailMalformedHTLC {
3575                         htlc_id,
3576                         channel_id,
3577                         sha256_of_onion: self.0,
3578                         failure_code: self.1
3579                 }
3580         }
3581         fn to_inbound_htlc_state(self) -> InboundHTLCState {
3582                 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3583         }
3584         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3585                 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3586                         htlc_id,
3587                         sha256_of_onion: self.0,
3588                         failure_code: self.1
3589                 }
3590         }
3591 }
3592
3593 trait FailHTLCMessageName {
3594         fn name() -> &'static str;
3595 }
3596 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3597         fn name() -> &'static str {
3598                 "update_fail_htlc"
3599         }
3600 }
3601 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3602         fn name() -> &'static str {
3603                 "update_fail_malformed_htlc"
3604         }
3605 }
3606
3607 impl<SP: Deref> Channel<SP> where
3608         SP::Target: SignerProvider,
3609         <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
3610 {
3611         fn check_remote_fee<F: Deref, L: Deref>(
3612                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3613                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3614         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3615         {
3616                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3617                         ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3618                 } else {
3619                         ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3620                 };
3621                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3622                 if feerate_per_kw < lower_limit {
3623                         if let Some(cur_feerate) = cur_feerate_per_kw {
3624                                 if feerate_per_kw > cur_feerate {
3625                                         log_warn!(logger,
3626                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3627                                                 cur_feerate, feerate_per_kw);
3628                                         return Ok(());
3629                                 }
3630                         }
3631                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3632                 }
3633                 Ok(())
3634         }
3635
3636         #[inline]
3637         fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3638                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3639                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3640                 // outside of those situations will fail.
3641                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3642         }
3643
3644         #[inline]
3645         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3646                 let mut ret =
3647                 (4 +                                                   // version
3648                  1 +                                                   // input count
3649                  36 +                                                  // prevout
3650                  1 +                                                   // script length (0)
3651                  4 +                                                   // sequence
3652                  1 +                                                   // output count
3653                  4                                                     // lock time
3654                  )*4 +                                                 // * 4 for non-witness parts
3655                 2 +                                                    // witness marker and flag
3656                 1 +                                                    // witness element count
3657                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
3658                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3659                 2*(1 + 71);                                            // two signatures + sighash type flags
3660                 if let Some(spk) = a_scriptpubkey {
3661                         ret += ((8+1) +                                    // output values and script length
3662                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
3663                 }
3664                 if let Some(spk) = b_scriptpubkey {
3665                         ret += ((8+1) +                                    // output values and script length
3666                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
3667                 }
3668                 ret
3669         }
3670
3671         #[inline]
3672         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3673                 assert!(self.context.pending_inbound_htlcs.is_empty());
3674                 assert!(self.context.pending_outbound_htlcs.is_empty());
3675                 assert!(self.context.pending_update_fee.is_none());
3676
3677                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3678                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3679                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3680
3681                 if value_to_holder < 0 {
3682                         assert!(self.context.is_outbound());
3683                         total_fee_satoshis += (-value_to_holder) as u64;
3684                 } else if value_to_counterparty < 0 {
3685                         assert!(!self.context.is_outbound());
3686                         total_fee_satoshis += (-value_to_counterparty) as u64;
3687                 }
3688
3689                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3690                         value_to_counterparty = 0;
3691                 }
3692
3693                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3694                         value_to_holder = 0;
3695                 }
3696
3697                 assert!(self.context.shutdown_scriptpubkey.is_some());
3698                 let holder_shutdown_script = self.get_closing_scriptpubkey();
3699                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3700                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3701
3702                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3703                 (closing_transaction, total_fee_satoshis)
3704         }
3705
3706         fn funding_outpoint(&self) -> OutPoint {
3707                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3708         }
3709
3710         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3711         /// entirely.
3712         ///
3713         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3714         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3715         ///
3716         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3717         /// disconnected).
3718         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3719                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3720         where L::Target: Logger {
3721                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3722                 // (see equivalent if condition there).
3723                 assert!(!self.context.channel_state.can_generate_new_commitment());
3724                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3725                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3726                 self.context.latest_monitor_update_id = mon_update_id;
3727                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3728                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3729                 }
3730         }
3731
3732         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3733                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3734                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3735                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3736                 // either.
3737                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3738                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3739                 }
3740
3741                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3742                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3743                 // these, but for now we just have to treat them as normal.
3744
3745                 let mut pending_idx = core::usize::MAX;
3746                 let mut htlc_value_msat = 0;
3747                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3748                         if htlc.htlc_id == htlc_id_arg {
3749                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3750                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3751                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3752                                 match htlc.state {
3753                                         InboundHTLCState::Committed => {},
3754                                         InboundHTLCState::LocalRemoved(ref reason) => {
3755                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3756                                                 } else {
3757                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3758                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3759                                                 }
3760                                                 return UpdateFulfillFetch::DuplicateClaim {};
3761                                         },
3762                                         _ => {
3763                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3764                                                 // Don't return in release mode here so that we can update channel_monitor
3765                                         }
3766                                 }
3767                                 pending_idx = idx;
3768                                 htlc_value_msat = htlc.amount_msat;
3769                                 break;
3770                         }
3771                 }
3772                 if pending_idx == core::usize::MAX {
3773                         #[cfg(any(test, fuzzing))]
3774                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3775                         // this is simply a duplicate claim, not previously failed and we lost funds.
3776                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3777                         return UpdateFulfillFetch::DuplicateClaim {};
3778                 }
3779
3780                 // Now update local state:
3781                 //
3782                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3783                 // can claim it even if the channel hits the chain before we see their next commitment.
3784                 self.context.latest_monitor_update_id += 1;
3785                 let monitor_update = ChannelMonitorUpdate {
3786                         update_id: self.context.latest_monitor_update_id,
3787                         counterparty_node_id: Some(self.context.counterparty_node_id),
3788                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3789                                 payment_preimage: payment_preimage_arg.clone(),
3790                         }],
3791                         channel_id: Some(self.context.channel_id()),
3792                 };
3793
3794                 if !self.context.channel_state.can_generate_new_commitment() {
3795                         // Note that this condition is the same as the assertion in
3796                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3797                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3798                         // do not not get into this branch.
3799                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
3800                                 match pending_update {
3801                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3802                                                 if htlc_id_arg == htlc_id {
3803                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
3804                                                         self.context.latest_monitor_update_id -= 1;
3805                                                         #[cfg(any(test, fuzzing))]
3806                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3807                                                         return UpdateFulfillFetch::DuplicateClaim {};
3808                                                 }
3809                                         },
3810                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3811                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3812                                         {
3813                                                 if htlc_id_arg == htlc_id {
3814                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3815                                                         // TODO: We may actually be able to switch to a fulfill here, though its
3816                                                         // rare enough it may not be worth the complexity burden.
3817                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3818                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3819                                                 }
3820                                         },
3821                                         _ => {}
3822                                 }
3823                         }
3824                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3825                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3826                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3827                         });
3828                         #[cfg(any(test, fuzzing))]
3829                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3830                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3831                 }
3832                 #[cfg(any(test, fuzzing))]
3833                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3834
3835                 {
3836                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3837                         if let InboundHTLCState::Committed = htlc.state {
3838                         } else {
3839                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3840                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3841                         }
3842                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3843                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3844                 }
3845
3846                 UpdateFulfillFetch::NewClaim {
3847                         monitor_update,
3848                         htlc_value_msat,
3849                         msg: Some(msgs::UpdateFulfillHTLC {
3850                                 channel_id: self.context.channel_id(),
3851                                 htlc_id: htlc_id_arg,
3852                                 payment_preimage: payment_preimage_arg,
3853                         }),
3854                 }
3855         }
3856
3857         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3858                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3859                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3860                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3861                                 // Even if we aren't supposed to let new monitor updates with commitment state
3862                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3863                                 // matter what. Sadly, to push a new monitor update which flies before others
3864                                 // already queued, we have to insert it into the pending queue and update the
3865                                 // update_ids of all the following monitors.
3866                                 if release_cs_monitor && msg.is_some() {
3867                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3868                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
3869                                         // to be strictly increasing by one, so decrement it here.
3870                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3871                                         monitor_update.updates.append(&mut additional_update.updates);
3872                                 } else {
3873                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
3874                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3875                                         monitor_update.update_id = new_mon_id;
3876                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
3877                                                 held_update.update.update_id += 1;
3878                                         }
3879                                         if msg.is_some() {
3880                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3881                                                 let update = self.build_commitment_no_status_check(logger);
3882                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3883                                                         update,
3884                                                 });
3885                                         }
3886                                 }
3887
3888                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3889                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3890                         },
3891                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3892                 }
3893         }
3894
3895         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3896         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3897         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3898         /// before we fail backwards.
3899         ///
3900         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3901         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3902         /// [`ChannelError::Ignore`].
3903         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3904         -> Result<(), ChannelError> where L::Target: Logger {
3905                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3906                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3907         }
3908
3909         /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3910         /// want to fail blinded HTLCs where we are not the intro node.
3911         ///
3912         /// See [`Self::queue_fail_htlc`] for more info.
3913         pub fn queue_fail_malformed_htlc<L: Deref>(
3914                 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3915         ) -> Result<(), ChannelError> where L::Target: Logger {
3916                 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3917                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3918         }
3919
3920         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3921         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3922         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3923         /// before we fail backwards.
3924         ///
3925         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3926         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3927         /// [`ChannelError::Ignore`].
3928         fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3929                 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3930                 logger: &L
3931         ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3932                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3933                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
3934                 }
3935
3936                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3937                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3938                 // these, but for now we just have to treat them as normal.
3939
3940                 let mut pending_idx = core::usize::MAX;
3941                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3942                         if htlc.htlc_id == htlc_id_arg {
3943                                 match htlc.state {
3944                                         InboundHTLCState::Committed => {},
3945                                         InboundHTLCState::LocalRemoved(ref reason) => {
3946                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3947                                                 } else {
3948                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
3949                                                 }
3950                                                 return Ok(None);
3951                                         },
3952                                         _ => {
3953                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3954                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
3955                                         }
3956                                 }
3957                                 pending_idx = idx;
3958                         }
3959                 }
3960                 if pending_idx == core::usize::MAX {
3961                         #[cfg(any(test, fuzzing))]
3962                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
3963                         // is simply a duplicate fail, not previously failed and we failed-back too early.
3964                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3965                         return Ok(None);
3966                 }
3967
3968                 if !self.context.channel_state.can_generate_new_commitment() {
3969                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
3970                         force_holding_cell = true;
3971                 }
3972
3973                 // Now update local state:
3974                 if force_holding_cell {
3975                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
3976                                 match pending_update {
3977                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3978                                                 if htlc_id_arg == htlc_id {
3979                                                         #[cfg(any(test, fuzzing))]
3980                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3981                                                         return Ok(None);
3982                                                 }
3983                                         },
3984                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3985                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3986                                         {
3987                                                 if htlc_id_arg == htlc_id {
3988                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
3989                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3990                                                 }
3991                                         },
3992                                         _ => {}
3993                                 }
3994                         }
3995                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3996                         self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3997                         return Ok(None);
3998                 }
3999
4000                 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
4001                         E::Message::name(), &self.context.channel_id());
4002                 {
4003                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
4004                         htlc.state = err_contents.clone().to_inbound_htlc_state();
4005                 }
4006
4007                 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
4008         }
4009
4010         // Message handlers:
4011         /// Updates the state of the channel to indicate that all channels in the batch have received
4012         /// funding_signed and persisted their monitors.
4013         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
4014         /// treated as a non-batch channel going forward.
4015         pub fn set_batch_ready(&mut self) {
4016                 self.context.is_batch_funding = None;
4017                 self.context.channel_state.clear_waiting_for_batch();
4018         }
4019
4020         /// Unsets the existing funding information.
4021         ///
4022         /// This must only be used if the channel has not yet completed funding and has not been used.
4023         ///
4024         /// Further, the channel must be immediately shut down after this with a call to
4025         /// [`ChannelContext::force_shutdown`].
4026         pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
4027                 debug_assert!(matches!(
4028                         self.context.channel_state, ChannelState::AwaitingChannelReady(_)
4029                 ));
4030                 self.context.channel_transaction_parameters.funding_outpoint = None;
4031                 self.context.channel_id = temporary_channel_id;
4032         }
4033
4034         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
4035         /// and the channel is now usable (and public), this may generate an announcement_signatures to
4036         /// reply with.
4037         pub fn channel_ready<NS: Deref, L: Deref>(
4038                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
4039                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
4040         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
4041         where
4042                 NS::Target: NodeSigner,
4043                 L::Target: Logger
4044         {
4045                 if self.context.channel_state.is_peer_disconnected() {
4046                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
4047                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
4048                 }
4049
4050                 if let Some(scid_alias) = msg.short_channel_id_alias {
4051                         if Some(scid_alias) != self.context.short_channel_id {
4052                                 // The scid alias provided can be used to route payments *from* our counterparty,
4053                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
4054                                 // when routing outbound payments.
4055                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
4056                         }
4057                 }
4058
4059                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
4060                 // batch, but we can receive channel_ready messages.
4061                 let mut check_reconnection = false;
4062                 match &self.context.channel_state {
4063                         ChannelState::AwaitingChannelReady(flags) => {
4064                                 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
4065                                 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4066                                 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
4067                                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
4068                                         check_reconnection = true;
4069                                 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
4070                                         self.context.channel_state.set_their_channel_ready();
4071                                 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
4072                                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
4073                                         self.context.update_time_counter += 1;
4074                                 } else {
4075                                         // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
4076                                         debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
4077                                 }
4078                         }
4079                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
4080                         ChannelState::ChannelReady(_) => check_reconnection = true,
4081                         _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
4082                 }
4083                 if check_reconnection {
4084                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
4085                         // required, or they're sending a fresh SCID alias.
4086                         let expected_point =
4087                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
4088                                         // If they haven't ever sent an updated point, the point they send should match
4089                                         // the current one.
4090                                         self.context.counterparty_cur_commitment_point
4091                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
4092                                         // If we've advanced the commitment number once, the second commitment point is
4093                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
4094                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
4095                                         self.context.counterparty_prev_commitment_point
4096                                 } else {
4097                                         // If they have sent updated points, channel_ready is always supposed to match
4098                                         // their "first" point, which we re-derive here.
4099                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
4100                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
4101                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
4102                                 };
4103                         if expected_point != Some(msg.next_per_commitment_point) {
4104                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
4105                         }
4106                         return Ok(None);
4107                 }
4108
4109                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4110                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4111
4112                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
4113
4114                 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger))
4115         }
4116
4117         pub fn update_add_htlc(
4118                 &mut self, msg: &msgs::UpdateAddHTLC, pending_forward_status: PendingHTLCStatus,
4119         ) -> Result<(), ChannelError> {
4120                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4121                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4122                 }
4123                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
4124                 if self.context.channel_state.is_remote_shutdown_sent() {
4125                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
4126                 }
4127                 if self.context.channel_state.is_peer_disconnected() {
4128                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
4129                 }
4130                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
4131                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
4132                 }
4133                 if msg.amount_msat == 0 {
4134                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
4135                 }
4136                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
4137                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
4138                 }
4139
4140                 let htlc_stats = self.context.get_pending_htlc_stats(None);
4141                 if htlc_stats.pending_inbound_htlcs + 1 > self.context.holder_max_accepted_htlcs as usize {
4142                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
4143                 }
4144                 if htlc_stats.pending_inbound_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
4145                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
4146                 }
4147
4148                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
4149                 // the reserve_satoshis we told them to always have as direct payment so that they lose
4150                 // something if we punish them for broadcasting an old state).
4151                 // Note that we don't really care about having a small/no to_remote output in our local
4152                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
4153                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
4154                 // present in the next commitment transaction we send them (at least for fulfilled ones,
4155                 // failed ones won't modify value_to_self).
4156                 // Note that we will send HTLCs which another instance of rust-lightning would think
4157                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
4158                 // Channel state once they will not be present in the next received commitment
4159                 // transaction).
4160                 let mut removed_outbound_total_msat = 0;
4161                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
4162                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
4163                                 removed_outbound_total_msat += htlc.amount_msat;
4164                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
4165                                 removed_outbound_total_msat += htlc.amount_msat;
4166                         }
4167                 }
4168
4169                 let pending_value_to_self_msat =
4170                         self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
4171                 let pending_remote_value_msat =
4172                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
4173                 if pending_remote_value_msat < msg.amount_msat {
4174                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
4175                 }
4176
4177                 // Check that the remote can afford to pay for this HTLC on-chain at the current
4178                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
4179                 {
4180                         let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
4181                                 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4182                                 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
4183                         };
4184                         let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4185                                 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4186                         } else {
4187                                 0
4188                         };
4189                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
4190                                 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
4191                         };
4192                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
4193                                 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
4194                         }
4195                 }
4196
4197                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
4198                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
4199                 } else {
4200                         0
4201                 };
4202                 if self.context.is_outbound() {
4203                         // Check that they won't violate our local required channel reserve by adding this HTLC.
4204                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
4205                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
4206                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
4207                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
4208                         }
4209                 }
4210                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
4211                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
4212                 }
4213                 if msg.cltv_expiry >= 500000000 {
4214                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
4215                 }
4216
4217                 if self.context.channel_state.is_local_shutdown_sent() {
4218                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
4219                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
4220                         }
4221                 }
4222
4223                 // Now update local state:
4224                 self.context.next_counterparty_htlc_id += 1;
4225                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
4226                         htlc_id: msg.htlc_id,
4227                         amount_msat: msg.amount_msat,
4228                         payment_hash: msg.payment_hash,
4229                         cltv_expiry: msg.cltv_expiry,
4230                         state: InboundHTLCState::RemoteAnnounced(InboundHTLCResolution::Resolved {
4231                                 pending_htlc_status: pending_forward_status
4232                         }),
4233                 });
4234                 Ok(())
4235         }
4236
4237         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
4238         #[inline]
4239         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
4240                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
4241                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4242                         if htlc.htlc_id == htlc_id {
4243                                 let outcome = match check_preimage {
4244                                         None => fail_reason.into(),
4245                                         Some(payment_preimage) => {
4246                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
4247                                                 if payment_hash != htlc.payment_hash {
4248                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
4249                                                 }
4250                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
4251                                         }
4252                                 };
4253                                 match htlc.state {
4254                                         OutboundHTLCState::LocalAnnounced(_) =>
4255                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
4256                                         OutboundHTLCState::Committed => {
4257                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
4258                                         },
4259                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
4260                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
4261                                 }
4262                                 return Ok(htlc);
4263                         }
4264                 }
4265                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4266         }
4267
4268         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4269                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4270                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4271                 }
4272                 if self.context.channel_state.is_peer_disconnected() {
4273                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4274                 }
4275
4276                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4277         }
4278
4279         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4280                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4281                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4282                 }
4283                 if self.context.channel_state.is_peer_disconnected() {
4284                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4285                 }
4286
4287                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4288                 Ok(())
4289         }
4290
4291         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4292                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4293                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4294                 }
4295                 if self.context.channel_state.is_peer_disconnected() {
4296                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4297                 }
4298
4299                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4300                 Ok(())
4301         }
4302
4303         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4304                 where L::Target: Logger
4305         {
4306                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4307                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4308                 }
4309                 if self.context.channel_state.is_peer_disconnected() {
4310                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4311                 }
4312                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4313                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4314                 }
4315
4316                 let funding_script = self.context.get_funding_redeemscript();
4317
4318                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4319
4320                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4321                 let commitment_txid = {
4322                         let trusted_tx = commitment_stats.tx.trust();
4323                         let bitcoin_tx = trusted_tx.built_transaction();
4324                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4325
4326                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4327                                 log_bytes!(msg.signature.serialize_compact()[..]),
4328                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4329                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4330                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4331                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4332                         }
4333                         bitcoin_tx.txid
4334                 };
4335                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4336
4337                 // If our counterparty updated the channel fee in this commitment transaction, check that
4338                 // they can actually afford the new fee now.
4339                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4340                         update_state == FeeUpdateState::RemoteAnnounced
4341                 } else { false };
4342                 if update_fee {
4343                         debug_assert!(!self.context.is_outbound());
4344                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4345                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4346                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4347                         }
4348                 }
4349                 #[cfg(any(test, fuzzing))]
4350                 {
4351                         if self.context.is_outbound() {
4352                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4353                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4354                                 if let Some(info) = projected_commit_tx_info {
4355                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4356                                                 + self.context.holding_cell_htlc_updates.len();
4357                                         if info.total_pending_htlcs == total_pending_htlcs
4358                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4359                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4360                                                 && info.feerate == self.context.feerate_per_kw {
4361                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4362                                                 }
4363                                 }
4364                         }
4365                 }
4366
4367                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4368                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4369                 }
4370
4371                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4372                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4373                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4374                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4375                 // backwards compatibility, we never use it in production. To provide test coverage, here,
4376                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4377                 #[allow(unused_assignments, unused_mut)]
4378                 let mut separate_nondust_htlc_sources = false;
4379                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4380                         use core::hash::{BuildHasher, Hasher};
4381                         // Get a random value using the only std API to do so - the DefaultHasher
4382                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4383                         separate_nondust_htlc_sources = rand_val % 2 == 0;
4384                 }
4385
4386                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4387                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4388                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4389                         if let Some(_) = htlc.transaction_output_index {
4390                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4391                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4392                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4393
4394                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4395                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4396                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
4397                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4398                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4399                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4400                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4401                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4402                                 }
4403                                 if !separate_nondust_htlc_sources {
4404                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4405                                 }
4406                         } else {
4407                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4408                         }
4409                         if separate_nondust_htlc_sources {
4410                                 if let Some(source) = source_opt.take() {
4411                                         nondust_htlc_sources.push(source);
4412                                 }
4413                         }
4414                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4415                 }
4416
4417                 let holder_commitment_tx = HolderCommitmentTransaction::new(
4418                         commitment_stats.tx,
4419                         msg.signature,
4420                         msg.htlc_signatures.clone(),
4421                         &self.context.get_holder_pubkeys().funding_pubkey,
4422                         self.context.counterparty_funding_pubkey()
4423                 );
4424
4425                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4426                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4427
4428                 // Update state now that we've passed all the can-fail calls...
4429                 let mut need_commitment = false;
4430                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4431                         if *update_state == FeeUpdateState::RemoteAnnounced {
4432                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4433                                 need_commitment = true;
4434                         }
4435                 }
4436
4437                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4438                         let htlc_resolution = if let &InboundHTLCState::RemoteAnnounced(ref resolution) = &htlc.state {
4439                                 Some(resolution.clone())
4440                         } else { None };
4441                         if let Some(htlc_resolution) = htlc_resolution {
4442                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4443                                         &htlc.payment_hash, &self.context.channel_id);
4444                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(htlc_resolution);
4445                                 need_commitment = true;
4446                         }
4447                 }
4448                 let mut claimed_htlcs = Vec::new();
4449                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4450                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4451                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4452                                         &htlc.payment_hash, &self.context.channel_id);
4453                                 // Grab the preimage, if it exists, instead of cloning
4454                                 let mut reason = OutboundHTLCOutcome::Success(None);
4455                                 mem::swap(outcome, &mut reason);
4456                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4457                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4458                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4459                                         // have a `Success(None)` reason. In this case we could forget some HTLC
4460                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
4461                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
4462                                         // claim anyway.
4463                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4464                                 }
4465                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4466                                 need_commitment = true;
4467                         }
4468                 }
4469
4470                 self.context.latest_monitor_update_id += 1;
4471                 let mut monitor_update = ChannelMonitorUpdate {
4472                         update_id: self.context.latest_monitor_update_id,
4473                         counterparty_node_id: Some(self.context.counterparty_node_id),
4474                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4475                                 commitment_tx: holder_commitment_tx,
4476                                 htlc_outputs: htlcs_and_sigs,
4477                                 claimed_htlcs,
4478                                 nondust_htlc_sources,
4479                         }],
4480                         channel_id: Some(self.context.channel_id()),
4481                 };
4482
4483                 self.context.cur_holder_commitment_transaction_number -= 1;
4484                 self.context.expecting_peer_commitment_signed = false;
4485                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4486                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4487                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4488
4489                 if self.context.channel_state.is_monitor_update_in_progress() {
4490                         // In case we initially failed monitor updating without requiring a response, we need
4491                         // to make sure the RAA gets sent first.
4492                         self.context.monitor_pending_revoke_and_ack = true;
4493                         if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4494                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4495                                 // the corresponding HTLC status updates so that
4496                                 // get_last_commitment_update_for_send includes the right HTLCs.
4497                                 self.context.monitor_pending_commitment_signed = true;
4498                                 let mut additional_update = self.build_commitment_no_status_check(logger);
4499                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4500                                 // strictly increasing by one, so decrement it here.
4501                                 self.context.latest_monitor_update_id = monitor_update.update_id;
4502                                 monitor_update.updates.append(&mut additional_update.updates);
4503                         }
4504                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4505                                 &self.context.channel_id);
4506                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
4507                 }
4508
4509                 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4510                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4511                         // we'll send one right away when we get the revoke_and_ack when we
4512                         // free_holding_cell_htlcs().
4513                         let mut additional_update = self.build_commitment_no_status_check(logger);
4514                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4515                         // strictly increasing by one, so decrement it here.
4516                         self.context.latest_monitor_update_id = monitor_update.update_id;
4517                         monitor_update.updates.append(&mut additional_update.updates);
4518                         true
4519                 } else { false };
4520
4521                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4522                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4523                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4524                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4525         }
4526
4527         /// Public version of the below, checking relevant preconditions first.
4528         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4529         /// returns `(None, Vec::new())`.
4530         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4531                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4532         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4533         where F::Target: FeeEstimator, L::Target: Logger
4534         {
4535                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4536                         self.free_holding_cell_htlcs(fee_estimator, logger)
4537                 } else { (None, Vec::new()) }
4538         }
4539
4540         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4541         /// for our counterparty.
4542         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4543                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4544         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4545         where F::Target: FeeEstimator, L::Target: Logger
4546         {
4547                 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4548                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4549                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4550                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4551
4552                         let mut monitor_update = ChannelMonitorUpdate {
4553                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4554                                 counterparty_node_id: Some(self.context.counterparty_node_id),
4555                                 updates: Vec::new(),
4556                                 channel_id: Some(self.context.channel_id()),
4557                         };
4558
4559                         let mut htlc_updates = Vec::new();
4560                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4561                         let mut update_add_count = 0;
4562                         let mut update_fulfill_count = 0;
4563                         let mut update_fail_count = 0;
4564                         let mut htlcs_to_fail = Vec::new();
4565                         for htlc_update in htlc_updates.drain(..) {
4566                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
4567                                 // fee races with adding too many outputs which push our total payments just over
4568                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
4569                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4570                                 // to rebalance channels.
4571                                 let fail_htlc_res = match &htlc_update {
4572                                         &HTLCUpdateAwaitingACK::AddHTLC {
4573                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4574                                                 skimmed_fee_msat, blinding_point, ..
4575                                         } => {
4576                                                 match self.send_htlc(
4577                                                         amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4578                                                         false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4579                                                 ) {
4580                                                         Ok(_) => update_add_count += 1,
4581                                                         Err(e) => {
4582                                                                 match e {
4583                                                                         ChannelError::Ignore(ref msg) => {
4584                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4585                                                                                 // If we fail to send here, then this HTLC should
4586                                                                                 // be failed backwards. Failing to send here
4587                                                                                 // indicates that this HTLC may keep being put back
4588                                                                                 // into the holding cell without ever being
4589                                                                                 // successfully forwarded/failed/fulfilled, causing
4590                                                                                 // our counterparty to eventually close on us.
4591                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
4592                                                                         },
4593                                                                         _ => {
4594                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4595                                                                         },
4596                                                                 }
4597                                                         }
4598                                                 }
4599                                                 None
4600                                         },
4601                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4602                                                 // If an HTLC claim was previously added to the holding cell (via
4603                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
4604                                                 // not fail - any in between attempts to claim the HTLC will have resulted
4605                                                 // in it hitting the holding cell again and we cannot change the state of a
4606                                                 // holding cell HTLC from fulfill to anything else.
4607                                                 let mut additional_monitor_update =
4608                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4609                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4610                                                         { monitor_update } else { unreachable!() };
4611                                                 update_fulfill_count += 1;
4612                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
4613                                                 None
4614                                         },
4615                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4616                                                 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4617                                                  .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4618                                         },
4619                                         &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4620                                                 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4621                                                  .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4622                                         }
4623                                 };
4624                                 if let Some(res) = fail_htlc_res {
4625                                         match res {
4626                                                 Ok(fail_msg_opt) => {
4627                                                         // If an HTLC failure was previously added to the holding cell (via
4628                                                         // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4629                                                         // not fail - we should never end up in a state where we double-fail
4630                                                         // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4631                                                         // for a full revocation before failing.
4632                                                         debug_assert!(fail_msg_opt.is_some());
4633                                                         update_fail_count += 1;
4634                                                 },
4635                                                 Err(ChannelError::Ignore(_)) => {},
4636                                                 Err(_) => {
4637                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4638                                                 },
4639                                         }
4640                                 }
4641                         }
4642                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4643                                 return (None, htlcs_to_fail);
4644                         }
4645                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4646                                 self.send_update_fee(feerate, false, fee_estimator, logger)
4647                         } else {
4648                                 None
4649                         };
4650
4651                         let mut additional_update = self.build_commitment_no_status_check(logger);
4652                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4653                         // but we want them to be strictly increasing by one, so reset it here.
4654                         self.context.latest_monitor_update_id = monitor_update.update_id;
4655                         monitor_update.updates.append(&mut additional_update.updates);
4656
4657                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4658                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4659                                 update_add_count, update_fulfill_count, update_fail_count);
4660
4661                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4662                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4663                 } else {
4664                         (None, Vec::new())
4665                 }
4666         }
4667
4668         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4669         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4670         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4671         /// generating an appropriate error *after* the channel state has been updated based on the
4672         /// revoke_and_ack message.
4673         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4674                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4675         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4676         where F::Target: FeeEstimator, L::Target: Logger,
4677         {
4678                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4679                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4680                 }
4681                 if self.context.channel_state.is_peer_disconnected() {
4682                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4683                 }
4684                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4685                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4686                 }
4687
4688                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4689
4690                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4691                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4692                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4693                         }
4694                 }
4695
4696                 if !self.context.channel_state.is_awaiting_remote_revoke() {
4697                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
4698                         // haven't given them a new commitment transaction to broadcast). We should probably
4699                         // take advantage of this by updating our channel monitor, sending them an error, and
4700                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4701                         // lot of work, and there's some chance this is all a misunderstanding anyway.
4702                         // We have to do *something*, though, since our signer may get mad at us for otherwise
4703                         // jumping a remote commitment number, so best to just force-close and move on.
4704                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4705                 }
4706
4707                 #[cfg(any(test, fuzzing))]
4708                 {
4709                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4710                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4711                 }
4712
4713                 match &self.context.holder_signer {
4714                         ChannelSignerType::Ecdsa(ecdsa) => {
4715                                 ecdsa.validate_counterparty_revocation(
4716                                         self.context.cur_counterparty_commitment_transaction_number + 1,
4717                                         &secret
4718                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4719                         },
4720                         // TODO (taproot|arik)
4721                         #[cfg(taproot)]
4722                         _ => todo!()
4723                 };
4724
4725                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4726                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4727                 self.context.latest_monitor_update_id += 1;
4728                 let mut monitor_update = ChannelMonitorUpdate {
4729                         update_id: self.context.latest_monitor_update_id,
4730                         counterparty_node_id: Some(self.context.counterparty_node_id),
4731                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4732                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4733                                 secret: msg.per_commitment_secret,
4734                         }],
4735                         channel_id: Some(self.context.channel_id()),
4736                 };
4737
4738                 // Update state now that we've passed all the can-fail calls...
4739                 // (note that we may still fail to generate the new commitment_signed message, but that's
4740                 // OK, we step the channel here and *then* if the new generation fails we can fail the
4741                 // channel based on that, but stepping stuff here should be safe either way.
4742                 self.context.channel_state.clear_awaiting_remote_revoke();
4743                 self.context.sent_message_awaiting_response = None;
4744                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4745                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4746                 self.context.cur_counterparty_commitment_transaction_number -= 1;
4747
4748                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4749                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4750                 }
4751
4752                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4753                 let mut to_forward_infos = Vec::new();
4754                 let mut pending_update_adds = Vec::new();
4755                 let mut revoked_htlcs = Vec::new();
4756                 let mut finalized_claimed_htlcs = Vec::new();
4757                 let mut update_fail_htlcs = Vec::new();
4758                 let mut update_fail_malformed_htlcs = Vec::new();
4759                 let mut require_commitment = false;
4760                 let mut value_to_self_msat_diff: i64 = 0;
4761
4762                 {
4763                         // Take references explicitly so that we can hold multiple references to self.context.
4764                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4765                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4766                         let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4767
4768                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4769                         pending_inbound_htlcs.retain(|htlc| {
4770                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4771                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4772                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4773                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
4774                                         }
4775                                         *expecting_peer_commitment_signed = true;
4776                                         false
4777                                 } else { true }
4778                         });
4779                         pending_outbound_htlcs.retain(|htlc| {
4780                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4781                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4782                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4783                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4784                                         } else {
4785                                                 finalized_claimed_htlcs.push(htlc.source.clone());
4786                                                 // They fulfilled, so we sent them money
4787                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
4788                                         }
4789                                         false
4790                                 } else { true }
4791                         });
4792                         for htlc in pending_inbound_htlcs.iter_mut() {
4793                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4794                                         true
4795                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4796                                         true
4797                                 } else { false };
4798                                 if swap {
4799                                         let mut state = InboundHTLCState::Committed;
4800                                         mem::swap(&mut state, &mut htlc.state);
4801
4802                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution) = state {
4803                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4804                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution);
4805                                                 require_commitment = true;
4806                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution) = state {
4807                                                 match resolution {
4808                                                         InboundHTLCResolution::Resolved { pending_htlc_status } =>
4809                                                                 match pending_htlc_status {
4810                                                                         PendingHTLCStatus::Fail(fail_msg) => {
4811                                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4812                                                                                 require_commitment = true;
4813                                                                                 match fail_msg {
4814                                                                                         HTLCFailureMsg::Relay(msg) => {
4815                                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4816                                                                                                 update_fail_htlcs.push(msg)
4817                                                                                         },
4818                                                                                         HTLCFailureMsg::Malformed(msg) => {
4819                                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4820                                                                                                 update_fail_malformed_htlcs.push(msg)
4821                                                                                         },
4822                                                                                 }
4823                                                                         },
4824                                                                         PendingHTLCStatus::Forward(forward_info) => {
4825                                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed, attempting to forward", &htlc.payment_hash);
4826                                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
4827                                                                                 htlc.state = InboundHTLCState::Committed;
4828                                                                         }
4829                                                                 }
4830                                                         InboundHTLCResolution::Pending { update_add_htlc } => {
4831                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4832                                                                 pending_update_adds.push(update_add_htlc);
4833                                                                 htlc.state = InboundHTLCState::Committed;
4834                                                         }
4835                                                 }
4836                                         }
4837                                 }
4838                         }
4839                         for htlc in pending_outbound_htlcs.iter_mut() {
4840                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4841                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4842                                         htlc.state = OutboundHTLCState::Committed;
4843                                         *expecting_peer_commitment_signed = true;
4844                                 }
4845                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4846                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4847                                         // Grab the preimage, if it exists, instead of cloning
4848                                         let mut reason = OutboundHTLCOutcome::Success(None);
4849                                         mem::swap(outcome, &mut reason);
4850                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4851                                         require_commitment = true;
4852                                 }
4853                         }
4854                 }
4855                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4856
4857                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4858                         match update_state {
4859                                 FeeUpdateState::Outbound => {
4860                                         debug_assert!(self.context.is_outbound());
4861                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4862                                         self.context.feerate_per_kw = feerate;
4863                                         self.context.pending_update_fee = None;
4864                                         self.context.expecting_peer_commitment_signed = true;
4865                                 },
4866                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4867                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4868                                         debug_assert!(!self.context.is_outbound());
4869                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4870                                         require_commitment = true;
4871                                         self.context.feerate_per_kw = feerate;
4872                                         self.context.pending_update_fee = None;
4873                                 },
4874                         }
4875                 }
4876
4877                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4878                 let release_state_str =
4879                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4880                 macro_rules! return_with_htlcs_to_fail {
4881                         ($htlcs_to_fail: expr) => {
4882                                 if !release_monitor {
4883                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4884                                                 update: monitor_update,
4885                                         });
4886                                         return Ok(($htlcs_to_fail, None));
4887                                 } else {
4888                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
4889                                 }
4890                         }
4891                 }
4892
4893                 self.context.monitor_pending_update_adds.append(&mut pending_update_adds);
4894
4895                 if self.context.channel_state.is_monitor_update_in_progress() {
4896                         // We can't actually generate a new commitment transaction (incl by freeing holding
4897                         // cells) while we can't update the monitor, so we just return what we have.
4898                         if require_commitment {
4899                                 self.context.monitor_pending_commitment_signed = true;
4900                                 // When the monitor updating is restored we'll call
4901                                 // get_last_commitment_update_for_send(), which does not update state, but we're
4902                                 // definitely now awaiting a remote revoke before we can step forward any more, so
4903                                 // set it here.
4904                                 let mut additional_update = self.build_commitment_no_status_check(logger);
4905                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4906                                 // strictly increasing by one, so decrement it here.
4907                                 self.context.latest_monitor_update_id = monitor_update.update_id;
4908                                 monitor_update.updates.append(&mut additional_update.updates);
4909                         }
4910                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4911                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4912                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4913                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4914                         return_with_htlcs_to_fail!(Vec::new());
4915                 }
4916
4917                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4918                         (Some(mut additional_update), htlcs_to_fail) => {
4919                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4920                                 // strictly increasing by one, so decrement it here.
4921                                 self.context.latest_monitor_update_id = monitor_update.update_id;
4922                                 monitor_update.updates.append(&mut additional_update.updates);
4923
4924                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4925                                         &self.context.channel_id(), release_state_str);
4926
4927                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4928                                 return_with_htlcs_to_fail!(htlcs_to_fail);
4929                         },
4930                         (None, htlcs_to_fail) => {
4931                                 if require_commitment {
4932                                         let mut additional_update = self.build_commitment_no_status_check(logger);
4933
4934                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4935                                         // strictly increasing by one, so decrement it here.
4936                                         self.context.latest_monitor_update_id = monitor_update.update_id;
4937                                         monitor_update.updates.append(&mut additional_update.updates);
4938
4939                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
4940                                                 &self.context.channel_id(),
4941                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
4942                                                 release_state_str);
4943
4944                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4945                                         return_with_htlcs_to_fail!(htlcs_to_fail);
4946                                 } else {
4947                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
4948                                                 &self.context.channel_id(), release_state_str);
4949
4950                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4951                                         return_with_htlcs_to_fail!(htlcs_to_fail);
4952                                 }
4953                         }
4954                 }
4955         }
4956
4957         /// Queues up an outbound update fee by placing it in the holding cell. You should call
4958         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4959         /// commitment update.
4960         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4961                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4962         where F::Target: FeeEstimator, L::Target: Logger
4963         {
4964                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4965                 assert!(msg_opt.is_none(), "We forced holding cell?");
4966         }
4967
4968         /// Adds a pending update to this channel. See the doc for send_htlc for
4969         /// further details on the optionness of the return value.
4970         /// If our balance is too low to cover the cost of the next commitment transaction at the
4971         /// new feerate, the update is cancelled.
4972         ///
4973         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4974         /// [`Channel`] if `force_holding_cell` is false.
4975         fn send_update_fee<F: Deref, L: Deref>(
4976                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4977                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4978         ) -> Option<msgs::UpdateFee>
4979         where F::Target: FeeEstimator, L::Target: Logger
4980         {
4981                 if !self.context.is_outbound() {
4982                         panic!("Cannot send fee from inbound channel");
4983                 }
4984                 if !self.context.is_usable() {
4985                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4986                 }
4987                 if !self.context.is_live() {
4988                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4989                 }
4990
4991                 // Before proposing a feerate update, check that we can actually afford the new fee.
4992                 let htlc_stats = self.context.get_pending_htlc_stats(Some(feerate_per_kw));
4993                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4994                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4995                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + htlc_stats.on_holder_tx_outbound_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4996                 let holder_balance_msat = commitment_stats.local_balance_msat - htlc_stats.outbound_holding_cell_msat;
4997                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4998                         //TODO: auto-close after a number of failures?
4999                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
5000                         return None;
5001                 }
5002
5003                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
5004                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5005                 if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5006                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5007                         return None;
5008                 }
5009                 if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5010                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
5011                         return None;
5012                 }
5013
5014                 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
5015                         force_holding_cell = true;
5016                 }
5017
5018                 if force_holding_cell {
5019                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
5020                         return None;
5021                 }
5022
5023                 debug_assert!(self.context.pending_update_fee.is_none());
5024                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
5025
5026                 Some(msgs::UpdateFee {
5027                         channel_id: self.context.channel_id,
5028                         feerate_per_kw,
5029                 })
5030         }
5031
5032         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
5033         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
5034         /// resent.
5035         /// No further message handling calls may be made until a channel_reestablish dance has
5036         /// completed.
5037         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
5038         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
5039                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5040                 if self.context.channel_state.is_pre_funded_state() {
5041                         return Err(())
5042                 }
5043
5044                 if self.context.channel_state.is_peer_disconnected() {
5045                         // While the below code should be idempotent, it's simpler to just return early, as
5046                         // redundant disconnect events can fire, though they should be rare.
5047                         return Ok(());
5048                 }
5049
5050                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
5051                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
5052                 }
5053
5054                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
5055                 // will be retransmitted.
5056                 self.context.last_sent_closing_fee = None;
5057                 self.context.pending_counterparty_closing_signed = None;
5058                 self.context.closing_fee_limits = None;
5059
5060                 let mut inbound_drop_count = 0;
5061                 self.context.pending_inbound_htlcs.retain(|htlc| {
5062                         match htlc.state {
5063                                 InboundHTLCState::RemoteAnnounced(_) => {
5064                                         // They sent us an update_add_htlc but we never got the commitment_signed.
5065                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
5066                                         // this HTLC accordingly
5067                                         inbound_drop_count += 1;
5068                                         false
5069                                 },
5070                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
5071                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
5072                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
5073                                         // in response to it yet, so don't touch it.
5074                                         true
5075                                 },
5076                                 InboundHTLCState::Committed => true,
5077                                 InboundHTLCState::LocalRemoved(_) => {
5078                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
5079                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
5080                                         // (that we missed). Keep this around for now and if they tell us they missed
5081                                         // the commitment_signed we can re-transmit the update then.
5082                                         true
5083                                 },
5084                         }
5085                 });
5086                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
5087
5088                 if let Some((_, update_state)) = self.context.pending_update_fee {
5089                         if update_state == FeeUpdateState::RemoteAnnounced {
5090                                 debug_assert!(!self.context.is_outbound());
5091                                 self.context.pending_update_fee = None;
5092                         }
5093                 }
5094
5095                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
5096                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
5097                                 // They sent us an update to remove this but haven't yet sent the corresponding
5098                                 // commitment_signed, we need to move it back to Committed and they can re-send
5099                                 // the update upon reconnection.
5100                                 htlc.state = OutboundHTLCState::Committed;
5101                         }
5102                 }
5103
5104                 self.context.sent_message_awaiting_response = None;
5105
5106                 self.context.channel_state.set_peer_disconnected();
5107                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
5108                 Ok(())
5109         }
5110
5111         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
5112         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
5113         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
5114         /// update completes (potentially immediately).
5115         /// The messages which were generated with the monitor update must *not* have been sent to the
5116         /// remote end, and must instead have been dropped. They will be regenerated when
5117         /// [`Self::monitor_updating_restored`] is called.
5118         ///
5119         /// [`ChannelManager`]: super::channelmanager::ChannelManager
5120         /// [`chain::Watch`]: crate::chain::Watch
5121         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
5122         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
5123                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
5124                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
5125                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
5126         ) {
5127                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
5128                 self.context.monitor_pending_commitment_signed |= resend_commitment;
5129                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
5130                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
5131                 self.context.monitor_pending_failures.append(&mut pending_fails);
5132                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
5133                 self.context.channel_state.set_monitor_update_in_progress();
5134         }
5135
5136         /// Indicates that the latest ChannelMonitor update has been committed by the client
5137         /// successfully and we should restore normal operation. Returns messages which should be sent
5138         /// to the remote side.
5139         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
5140                 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
5141                 user_config: &UserConfig, best_block_height: u32
5142         ) -> MonitorRestoreUpdates
5143         where
5144                 L::Target: Logger,
5145                 NS::Target: NodeSigner
5146         {
5147                 assert!(self.context.channel_state.is_monitor_update_in_progress());
5148                 self.context.channel_state.clear_monitor_update_in_progress();
5149
5150                 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
5151                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
5152                 // first received the funding_signed.
5153                 let mut funding_broadcastable =
5154                         if self.context.is_outbound() &&
5155                                 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
5156                                 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
5157                         {
5158                                 self.context.funding_transaction.take()
5159                         } else { None };
5160                 // That said, if the funding transaction is already confirmed (ie we're active with a
5161                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
5162                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
5163                         funding_broadcastable = None;
5164                 }
5165
5166                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
5167                 // (and we assume the user never directly broadcasts the funding transaction and waits for
5168                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
5169                 // * an inbound channel that failed to persist the monitor on funding_created and we got
5170                 //   the funding transaction confirmed before the monitor was persisted, or
5171                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
5172                 let channel_ready = if self.context.monitor_pending_channel_ready {
5173                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
5174                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
5175                         self.context.monitor_pending_channel_ready = false;
5176                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5177                         Some(msgs::ChannelReady {
5178                                 channel_id: self.context.channel_id(),
5179                                 next_per_commitment_point,
5180                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5181                         })
5182                 } else { None };
5183
5184                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
5185
5186                 let mut accepted_htlcs = Vec::new();
5187                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
5188                 let mut failed_htlcs = Vec::new();
5189                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
5190                 let mut finalized_claimed_htlcs = Vec::new();
5191                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
5192                 let mut pending_update_adds = Vec::new();
5193                 mem::swap(&mut pending_update_adds, &mut self.context.monitor_pending_update_adds);
5194
5195                 if self.context.channel_state.is_peer_disconnected() {
5196                         self.context.monitor_pending_revoke_and_ack = false;
5197                         self.context.monitor_pending_commitment_signed = false;
5198                         return MonitorRestoreUpdates {
5199                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
5200                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, pending_update_adds,
5201                                 funding_broadcastable, channel_ready, announcement_sigs
5202                         };
5203                 }
5204
5205                 let raa = if self.context.monitor_pending_revoke_and_ack {
5206                         Some(self.get_last_revoke_and_ack())
5207                 } else { None };
5208                 let commitment_update = if self.context.monitor_pending_commitment_signed {
5209                         self.get_last_commitment_update_for_send(logger).ok()
5210                 } else { None };
5211                 if commitment_update.is_some() {
5212                         self.mark_awaiting_response();
5213                 }
5214
5215                 self.context.monitor_pending_revoke_and_ack = false;
5216                 self.context.monitor_pending_commitment_signed = false;
5217                 let order = self.context.resend_order.clone();
5218                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
5219                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
5220                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
5221                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
5222                 MonitorRestoreUpdates {
5223                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs,
5224                         pending_update_adds, funding_broadcastable, channel_ready, announcement_sigs
5225                 }
5226         }
5227
5228         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
5229                 where F::Target: FeeEstimator, L::Target: Logger
5230         {
5231                 if self.context.is_outbound() {
5232                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
5233                 }
5234                 if self.context.channel_state.is_peer_disconnected() {
5235                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
5236                 }
5237                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
5238
5239                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
5240                 self.context.update_time_counter += 1;
5241                 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
5242                 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
5243                         let htlc_stats = self.context.get_pending_htlc_stats(None);
5244                         let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
5245                         if htlc_stats.on_holder_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5246                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
5247                                         msg.feerate_per_kw, htlc_stats.on_holder_tx_dust_exposure_msat)));
5248                         }
5249                         if htlc_stats.on_counterparty_tx_dust_exposure_msat > max_dust_htlc_exposure_msat {
5250                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
5251                                         msg.feerate_per_kw, htlc_stats.on_counterparty_tx_dust_exposure_msat)));
5252                         }
5253                 }
5254                 Ok(())
5255         }
5256
5257         /// Indicates that the signer may have some signatures for us, so we should retry if we're
5258         /// blocked.
5259         #[cfg(async_signing)]
5260         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
5261                 let commitment_update = if self.context.signer_pending_commitment_update {
5262                         self.get_last_commitment_update_for_send(logger).ok()
5263                 } else { None };
5264                 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
5265                         self.context.get_funding_signed_msg(logger).1
5266                 } else { None };
5267                 let channel_ready = if funding_signed.is_some() {
5268                         self.check_get_channel_ready(0)
5269                 } else { None };
5270
5271                 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5272                         if commitment_update.is_some() { "a" } else { "no" },
5273                         if funding_signed.is_some() { "a" } else { "no" },
5274                         if channel_ready.is_some() { "a" } else { "no" });
5275
5276                 SignerResumeUpdates {
5277                         commitment_update,
5278                         funding_signed,
5279                         channel_ready,
5280                 }
5281         }
5282
5283         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5284                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5285                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5286                 msgs::RevokeAndACK {
5287                         channel_id: self.context.channel_id,
5288                         per_commitment_secret,
5289                         next_per_commitment_point,
5290                         #[cfg(taproot)]
5291                         next_local_nonce: None,
5292                 }
5293         }
5294
5295         /// Gets the last commitment update for immediate sending to our peer.
5296         fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5297                 let mut update_add_htlcs = Vec::new();
5298                 let mut update_fulfill_htlcs = Vec::new();
5299                 let mut update_fail_htlcs = Vec::new();
5300                 let mut update_fail_malformed_htlcs = Vec::new();
5301
5302                 for htlc in self.context.pending_outbound_htlcs.iter() {
5303                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5304                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
5305                                         channel_id: self.context.channel_id(),
5306                                         htlc_id: htlc.htlc_id,
5307                                         amount_msat: htlc.amount_msat,
5308                                         payment_hash: htlc.payment_hash,
5309                                         cltv_expiry: htlc.cltv_expiry,
5310                                         onion_routing_packet: (**onion_packet).clone(),
5311                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
5312                                         blinding_point: htlc.blinding_point,
5313                                 });
5314                         }
5315                 }
5316
5317                 for htlc in self.context.pending_inbound_htlcs.iter() {
5318                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5319                                 match reason {
5320                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5321                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5322                                                         channel_id: self.context.channel_id(),
5323                                                         htlc_id: htlc.htlc_id,
5324                                                         reason: err_packet.clone()
5325                                                 });
5326                                         },
5327                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5328                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5329                                                         channel_id: self.context.channel_id(),
5330                                                         htlc_id: htlc.htlc_id,
5331                                                         sha256_of_onion: sha256_of_onion.clone(),
5332                                                         failure_code: failure_code.clone(),
5333                                                 });
5334                                         },
5335                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5336                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5337                                                         channel_id: self.context.channel_id(),
5338                                                         htlc_id: htlc.htlc_id,
5339                                                         payment_preimage: payment_preimage.clone(),
5340                                                 });
5341                                         },
5342                                 }
5343                         }
5344                 }
5345
5346                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5347                         Some(msgs::UpdateFee {
5348                                 channel_id: self.context.channel_id(),
5349                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5350                         })
5351                 } else { None };
5352
5353                 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5354                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5355                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5356                 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5357                         if self.context.signer_pending_commitment_update {
5358                                 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5359                                 self.context.signer_pending_commitment_update = false;
5360                         }
5361                         update
5362                 } else {
5363                         #[cfg(not(async_signing))] {
5364                                 panic!("Failed to get signature for new commitment state");
5365                         }
5366                         #[cfg(async_signing)] {
5367                                 if !self.context.signer_pending_commitment_update {
5368                                         log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5369                                         self.context.signer_pending_commitment_update = true;
5370                                 }
5371                                 return Err(());
5372                         }
5373                 };
5374                 Ok(msgs::CommitmentUpdate {
5375                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5376                         commitment_signed,
5377                 })
5378         }
5379
5380         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5381         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5382                 if self.context.channel_state.is_local_shutdown_sent() {
5383                         assert!(self.context.shutdown_scriptpubkey.is_some());
5384                         Some(msgs::Shutdown {
5385                                 channel_id: self.context.channel_id,
5386                                 scriptpubkey: self.get_closing_scriptpubkey(),
5387                         })
5388                 } else { None }
5389         }
5390
5391         /// May panic if some calls other than message-handling calls (which will all Err immediately)
5392         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5393         ///
5394         /// Some links printed in log lines are included here to check them during build (when run with
5395         /// `cargo doc --document-private-items`):
5396         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5397         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5398         pub fn channel_reestablish<L: Deref, NS: Deref>(
5399                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5400                 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5401         ) -> Result<ReestablishResponses, ChannelError>
5402         where
5403                 L::Target: Logger,
5404                 NS::Target: NodeSigner
5405         {
5406                 if !self.context.channel_state.is_peer_disconnected() {
5407                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5408                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
5409                         // just close here instead of trying to recover.
5410                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5411                 }
5412
5413                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5414                         msg.next_local_commitment_number == 0 {
5415                         return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5416                 }
5417
5418                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5419                 if msg.next_remote_commitment_number > 0 {
5420                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5421                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5422                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5423                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5424                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5425                         }
5426                         if msg.next_remote_commitment_number > our_commitment_transaction {
5427                                 macro_rules! log_and_panic {
5428                                         ($err_msg: expr) => {
5429                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5430                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5431                                         }
5432                                 }
5433                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5434                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5435                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5436                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5437                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5438                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5439                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5440                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5441                         }
5442                 }
5443
5444                 // Before we change the state of the channel, we check if the peer is sending a very old
5445                 // commitment transaction number, if yes we send a warning message.
5446                 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5447                         return Err(ChannelError::Warn(format!(
5448                                 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5449                                 msg.next_remote_commitment_number,
5450                                 our_commitment_transaction
5451                         )));
5452                 }
5453
5454                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5455                 // remaining cases either succeed or ErrorMessage-fail).
5456                 self.context.channel_state.clear_peer_disconnected();
5457                 self.context.sent_message_awaiting_response = None;
5458
5459                 let shutdown_msg = self.get_outbound_shutdown();
5460
5461                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height, logger);
5462
5463                 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5464                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5465                         if !self.context.channel_state.is_our_channel_ready() ||
5466                                         self.context.channel_state.is_monitor_update_in_progress() {
5467                                 if msg.next_remote_commitment_number != 0 {
5468                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5469                                 }
5470                                 // Short circuit the whole handler as there is nothing we can resend them
5471                                 return Ok(ReestablishResponses {
5472                                         channel_ready: None,
5473                                         raa: None, commitment_update: None,
5474                                         order: RAACommitmentOrder::CommitmentFirst,
5475                                         shutdown_msg, announcement_sigs,
5476                                 });
5477                         }
5478
5479                         // We have OurChannelReady set!
5480                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5481                         return Ok(ReestablishResponses {
5482                                 channel_ready: Some(msgs::ChannelReady {
5483                                         channel_id: self.context.channel_id(),
5484                                         next_per_commitment_point,
5485                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
5486                                 }),
5487                                 raa: None, commitment_update: None,
5488                                 order: RAACommitmentOrder::CommitmentFirst,
5489                                 shutdown_msg, announcement_sigs,
5490                         });
5491                 }
5492
5493                 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5494                         // Remote isn't waiting on any RevokeAndACK from us!
5495                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5496                         None
5497                 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5498                         if self.context.channel_state.is_monitor_update_in_progress() {
5499                                 self.context.monitor_pending_revoke_and_ack = true;
5500                                 None
5501                         } else {
5502                                 Some(self.get_last_revoke_and_ack())
5503                         }
5504                 } else {
5505                         debug_assert!(false, "All values should have been handled in the four cases above");
5506                         return Err(ChannelError::Close(format!(
5507                                 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5508                                 msg.next_remote_commitment_number,
5509                                 our_commitment_transaction
5510                         )));
5511                 };
5512
5513                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5514                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5515                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5516                 // the corresponding revoke_and_ack back yet.
5517                 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5518                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5519                         self.mark_awaiting_response();
5520                 }
5521                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5522
5523                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5524                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5525                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5526                         Some(msgs::ChannelReady {
5527                                 channel_id: self.context.channel_id(),
5528                                 next_per_commitment_point,
5529                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5530                         })
5531                 } else { None };
5532
5533                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5534                         if required_revoke.is_some() {
5535                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5536                         } else {
5537                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5538                         }
5539
5540                         Ok(ReestablishResponses {
5541                                 channel_ready, shutdown_msg, announcement_sigs,
5542                                 raa: required_revoke,
5543                                 commitment_update: None,
5544                                 order: self.context.resend_order.clone(),
5545                         })
5546                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5547                         if required_revoke.is_some() {
5548                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5549                         } else {
5550                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5551                         }
5552
5553                         if self.context.channel_state.is_monitor_update_in_progress() {
5554                                 self.context.monitor_pending_commitment_signed = true;
5555                                 Ok(ReestablishResponses {
5556                                         channel_ready, shutdown_msg, announcement_sigs,
5557                                         commitment_update: None, raa: None,
5558                                         order: self.context.resend_order.clone(),
5559                                 })
5560                         } else {
5561                                 Ok(ReestablishResponses {
5562                                         channel_ready, shutdown_msg, announcement_sigs,
5563                                         raa: required_revoke,
5564                                         commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5565                                         order: self.context.resend_order.clone(),
5566                                 })
5567                         }
5568                 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5569                         Err(ChannelError::Close(format!(
5570                                 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5571                                 msg.next_local_commitment_number,
5572                                 next_counterparty_commitment_number,
5573                         )))
5574                 } else {
5575                         Err(ChannelError::Close(format!(
5576                                 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5577                                 msg.next_local_commitment_number,
5578                                 next_counterparty_commitment_number,
5579                         )))
5580                 }
5581         }
5582
5583         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5584         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5585         /// at which point they will be recalculated.
5586         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5587                 -> (u64, u64)
5588                 where F::Target: FeeEstimator
5589         {
5590                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5591
5592                 // Propose a range from our current Background feerate to our Normal feerate plus our
5593                 // force_close_avoidance_max_fee_satoshis.
5594                 // If we fail to come to consensus, we'll have to force-close.
5595                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5596                 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5597                 // that we don't expect to need fee bumping
5598                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5599                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5600
5601                 // The spec requires that (when the channel does not have anchors) we only send absolute
5602                 // channel fees no greater than the absolute channel fee on the current commitment
5603                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5604                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5605                 // some force-closure by old nodes, but we wanted to close the channel anyway.
5606
5607                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5608                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5609                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5610                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5611                 }
5612
5613                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5614                 // below our dust limit, causing the output to disappear. We don't bother handling this
5615                 // case, however, as this should only happen if a channel is closed before any (material)
5616                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5617                 // come to consensus with our counterparty on appropriate fees, however it should be a
5618                 // relatively rare case. We can revisit this later, though note that in order to determine
5619                 // if the funders' output is dust we have to know the absolute fee we're going to use.
5620                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5621                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5622                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5623                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
5624                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
5625                                 // target feerate-calculated fee.
5626                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5627                                         proposed_max_feerate as u64 * tx_weight / 1000)
5628                         } else {
5629                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5630                         };
5631
5632                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5633                 self.context.closing_fee_limits.clone().unwrap()
5634         }
5635
5636         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5637         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5638         /// this point if we're the funder we should send the initial closing_signed, and in any case
5639         /// shutdown should complete within a reasonable timeframe.
5640         fn closing_negotiation_ready(&self) -> bool {
5641                 self.context.closing_negotiation_ready()
5642         }
5643
5644         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5645         /// an Err if no progress is being made and the channel should be force-closed instead.
5646         /// Should be called on a one-minute timer.
5647         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5648                 if self.closing_negotiation_ready() {
5649                         if self.context.closing_signed_in_flight {
5650                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5651                         } else {
5652                                 self.context.closing_signed_in_flight = true;
5653                         }
5654                 }
5655                 Ok(())
5656         }
5657
5658         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5659                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5660                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5661                 where F::Target: FeeEstimator, L::Target: Logger
5662         {
5663                 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5664                 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5665                 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5666                 // that closing_negotiation_ready checks this case (as well as a few others).
5667                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5668                         return Ok((None, None, None));
5669                 }
5670
5671                 if !self.context.is_outbound() {
5672                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5673                                 return self.closing_signed(fee_estimator, &msg);
5674                         }
5675                         return Ok((None, None, None));
5676                 }
5677
5678                 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5679                 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5680                 if self.context.expecting_peer_commitment_signed {
5681                         return Ok((None, None, None));
5682                 }
5683
5684                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5685
5686                 assert!(self.context.shutdown_scriptpubkey.is_some());
5687                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5688                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5689                         our_min_fee, our_max_fee, total_fee_satoshis);
5690
5691                 match &self.context.holder_signer {
5692                         ChannelSignerType::Ecdsa(ecdsa) => {
5693                                 let sig = ecdsa
5694                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5695                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5696
5697                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5698                                 Ok((Some(msgs::ClosingSigned {
5699                                         channel_id: self.context.channel_id,
5700                                         fee_satoshis: total_fee_satoshis,
5701                                         signature: sig,
5702                                         fee_range: Some(msgs::ClosingSignedFeeRange {
5703                                                 min_fee_satoshis: our_min_fee,
5704                                                 max_fee_satoshis: our_max_fee,
5705                                         }),
5706                                 }), None, None))
5707                         },
5708                         // TODO (taproot|arik)
5709                         #[cfg(taproot)]
5710                         _ => todo!()
5711                 }
5712         }
5713
5714         // Marks a channel as waiting for a response from the counterparty. If it's not received
5715         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5716         // a reconnection.
5717         fn mark_awaiting_response(&mut self) {
5718                 self.context.sent_message_awaiting_response = Some(0);
5719         }
5720
5721         /// Determines whether we should disconnect the counterparty due to not receiving a response
5722         /// within our expected timeframe.
5723         ///
5724         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5725         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5726                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5727                         ticks_elapsed
5728                 } else {
5729                         // Don't disconnect when we're not waiting on a response.
5730                         return false;
5731                 };
5732                 *ticks_elapsed += 1;
5733                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5734         }
5735
5736         pub fn shutdown(
5737                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5738         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5739         {
5740                 if self.context.channel_state.is_peer_disconnected() {
5741                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5742                 }
5743                 if self.context.channel_state.is_pre_funded_state() {
5744                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
5745                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5746                         // can do that via error message without getting a connection fail anyway...
5747                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5748                 }
5749                 for htlc in self.context.pending_inbound_htlcs.iter() {
5750                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5751                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5752                         }
5753                 }
5754                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5755
5756                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5757                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5758                 }
5759
5760                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5761                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5762                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5763                         }
5764                 } else {
5765                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5766                 }
5767
5768                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5769                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5770                 // any further commitment updates after we set LocalShutdownSent.
5771                 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5772
5773                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5774                         Some(_) => false,
5775                         None => {
5776                                 assert!(send_shutdown);
5777                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5778                                         Ok(scriptpubkey) => scriptpubkey,
5779                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5780                                 };
5781                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5782                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5783                                 }
5784                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5785                                 true
5786                         },
5787                 };
5788
5789                 // From here on out, we may not fail!
5790
5791                 self.context.channel_state.set_remote_shutdown_sent();
5792                 self.context.update_time_counter += 1;
5793
5794                 let monitor_update = if update_shutdown_script {
5795                         self.context.latest_monitor_update_id += 1;
5796                         let monitor_update = ChannelMonitorUpdate {
5797                                 update_id: self.context.latest_monitor_update_id,
5798                                 counterparty_node_id: Some(self.context.counterparty_node_id),
5799                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5800                                         scriptpubkey: self.get_closing_scriptpubkey(),
5801                                 }],
5802                                 channel_id: Some(self.context.channel_id()),
5803                         };
5804                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5805                         self.push_ret_blockable_mon_update(monitor_update)
5806                 } else { None };
5807                 let shutdown = if send_shutdown {
5808                         Some(msgs::Shutdown {
5809                                 channel_id: self.context.channel_id,
5810                                 scriptpubkey: self.get_closing_scriptpubkey(),
5811                         })
5812                 } else { None };
5813
5814                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5815                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5816                 // cell HTLCs and return them to fail the payment.
5817                 self.context.holding_cell_update_fee = None;
5818                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5819                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5820                         match htlc_update {
5821                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5822                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5823                                         false
5824                                 },
5825                                 _ => true
5826                         }
5827                 });
5828
5829                 self.context.channel_state.set_local_shutdown_sent();
5830                 self.context.update_time_counter += 1;
5831
5832                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5833         }
5834
5835         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5836                 let mut tx = closing_tx.trust().built_transaction().clone();
5837
5838                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5839
5840                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5841                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5842                 let mut holder_sig = sig.serialize_der().to_vec();
5843                 holder_sig.push(EcdsaSighashType::All as u8);
5844                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5845                 cp_sig.push(EcdsaSighashType::All as u8);
5846                 if funding_key[..] < counterparty_funding_key[..] {
5847                         tx.input[0].witness.push(holder_sig);
5848                         tx.input[0].witness.push(cp_sig);
5849                 } else {
5850                         tx.input[0].witness.push(cp_sig);
5851                         tx.input[0].witness.push(holder_sig);
5852                 }
5853
5854                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5855                 tx
5856         }
5857
5858         pub fn closing_signed<F: Deref>(
5859                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5860                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5861                 where F::Target: FeeEstimator
5862         {
5863                 if !self.context.channel_state.is_both_sides_shutdown() {
5864                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5865                 }
5866                 if self.context.channel_state.is_peer_disconnected() {
5867                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5868                 }
5869                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5870                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5871                 }
5872                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5873                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5874                 }
5875
5876                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5877                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5878                 }
5879
5880                 if self.context.channel_state.is_monitor_update_in_progress() {
5881                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
5882                         return Ok((None, None, None));
5883                 }
5884
5885                 let funding_redeemscript = self.context.get_funding_redeemscript();
5886                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5887                 if used_total_fee != msg.fee_satoshis {
5888                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5889                 }
5890                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5891
5892                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5893                         Ok(_) => {},
5894                         Err(_e) => {
5895                                 // The remote end may have decided to revoke their output due to inconsistent dust
5896                                 // limits, so check for that case by re-checking the signature here.
5897                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5898                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5899                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5900                         },
5901                 };
5902
5903                 for outp in closing_tx.trust().built_transaction().output.iter() {
5904                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
5905                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5906                         }
5907                 }
5908
5909                 let closure_reason = if self.initiated_shutdown() {
5910                         ClosureReason::LocallyInitiatedCooperativeClosure
5911                 } else {
5912                         ClosureReason::CounterpartyInitiatedCooperativeClosure
5913                 };
5914
5915                 assert!(self.context.shutdown_scriptpubkey.is_some());
5916                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5917                         if last_fee == msg.fee_satoshis {
5918                                 let shutdown_result = ShutdownResult {
5919                                         closure_reason,
5920                                         monitor_update: None,
5921                                         dropped_outbound_htlcs: Vec::new(),
5922                                         unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5923                                         channel_id: self.context.channel_id,
5924                                         user_channel_id: self.context.user_id,
5925                                         channel_capacity_satoshis: self.context.channel_value_satoshis,
5926                                         counterparty_node_id: self.context.counterparty_node_id,
5927                                         unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5928                                         channel_funding_txo: self.context.get_funding_txo(),
5929                                 };
5930                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5931                                 self.context.channel_state = ChannelState::ShutdownComplete;
5932                                 self.context.update_time_counter += 1;
5933                                 return Ok((None, Some(tx), Some(shutdown_result)));
5934                         }
5935                 }
5936
5937                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5938
5939                 macro_rules! propose_fee {
5940                         ($new_fee: expr) => {
5941                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
5942                                         (closing_tx, $new_fee)
5943                                 } else {
5944                                         self.build_closing_transaction($new_fee, false)
5945                                 };
5946
5947                                 return match &self.context.holder_signer {
5948                                         ChannelSignerType::Ecdsa(ecdsa) => {
5949                                                 let sig = ecdsa
5950                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5951                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
5952                                                 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
5953                                                         let shutdown_result = ShutdownResult {
5954                                                                 closure_reason,
5955                                                                 monitor_update: None,
5956                                                                 dropped_outbound_htlcs: Vec::new(),
5957                                                                 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5958                                                                 channel_id: self.context.channel_id,
5959                                                                 user_channel_id: self.context.user_id,
5960                                                                 channel_capacity_satoshis: self.context.channel_value_satoshis,
5961                                                                 counterparty_node_id: self.context.counterparty_node_id,
5962                                                                 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5963                                                                 channel_funding_txo: self.context.get_funding_txo(),
5964                                                         };
5965                                                         self.context.channel_state = ChannelState::ShutdownComplete;
5966                                                         self.context.update_time_counter += 1;
5967                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5968                                                         (Some(tx), Some(shutdown_result))
5969                                                 } else {
5970                                                         (None, None)
5971                                                 };
5972
5973                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5974                                                 Ok((Some(msgs::ClosingSigned {
5975                                                         channel_id: self.context.channel_id,
5976                                                         fee_satoshis: used_fee,
5977                                                         signature: sig,
5978                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
5979                                                                 min_fee_satoshis: our_min_fee,
5980                                                                 max_fee_satoshis: our_max_fee,
5981                                                         }),
5982                                                 }), signed_tx, shutdown_result))
5983                                         },
5984                                         // TODO (taproot|arik)
5985                                         #[cfg(taproot)]
5986                                         _ => todo!()
5987                                 }
5988                         }
5989                 }
5990
5991                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5992                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5993                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5994                         }
5995                         if max_fee_satoshis < our_min_fee {
5996                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5997                         }
5998                         if min_fee_satoshis > our_max_fee {
5999                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
6000                         }
6001
6002                         if !self.context.is_outbound() {
6003                                 // They have to pay, so pick the highest fee in the overlapping range.
6004                                 // We should never set an upper bound aside from their full balance
6005                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
6006                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
6007                         } else {
6008                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
6009                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
6010                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
6011                                 }
6012                                 // The proposed fee is in our acceptable range, accept it and broadcast!
6013                                 propose_fee!(msg.fee_satoshis);
6014                         }
6015                 } else {
6016                         // Old fee style negotiation. We don't bother to enforce whether they are complying
6017                         // with the "making progress" requirements, we just comply and hope for the best.
6018                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
6019                                 if msg.fee_satoshis > last_fee {
6020                                         if msg.fee_satoshis < our_max_fee {
6021                                                 propose_fee!(msg.fee_satoshis);
6022                                         } else if last_fee < our_max_fee {
6023                                                 propose_fee!(our_max_fee);
6024                                         } else {
6025                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
6026                                         }
6027                                 } else {
6028                                         if msg.fee_satoshis > our_min_fee {
6029                                                 propose_fee!(msg.fee_satoshis);
6030                                         } else if last_fee > our_min_fee {
6031                                                 propose_fee!(our_min_fee);
6032                                         } else {
6033                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
6034                                         }
6035                                 }
6036                         } else {
6037                                 if msg.fee_satoshis < our_min_fee {
6038                                         propose_fee!(our_min_fee);
6039                                 } else if msg.fee_satoshis > our_max_fee {
6040                                         propose_fee!(our_max_fee);
6041                                 } else {
6042                                         propose_fee!(msg.fee_satoshis);
6043                                 }
6044                         }
6045                 }
6046         }
6047
6048         fn internal_htlc_satisfies_config(
6049                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
6050         ) -> Result<(), (&'static str, u16)> {
6051                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
6052                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
6053                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
6054                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
6055                         return Err((
6056                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
6057                                 0x1000 | 12, // fee_insufficient
6058                         ));
6059                 }
6060                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
6061                         return Err((
6062                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
6063                                 0x1000 | 13, // incorrect_cltv_expiry
6064                         ));
6065                 }
6066                 Ok(())
6067         }
6068
6069         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
6070         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
6071         /// unsuccessful, falls back to the previous one if one exists.
6072         pub fn htlc_satisfies_config(
6073                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
6074         ) -> Result<(), (&'static str, u16)> {
6075                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
6076                         .or_else(|err| {
6077                                 if let Some(prev_config) = self.context.prev_config() {
6078                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
6079                                 } else {
6080                                         Err(err)
6081                                 }
6082                         })
6083         }
6084
6085         pub fn can_accept_incoming_htlc<F: Deref, L: Deref>(
6086                 &self, msg: &msgs::UpdateAddHTLC, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: L
6087         ) -> Result<(), (&'static str, u16)>
6088         where
6089                 F::Target: FeeEstimator,
6090                 L::Target: Logger
6091         {
6092                 if self.context.channel_state.is_local_shutdown_sent() {
6093                         return Err(("Shutdown was already sent", 0x4000|8))
6094                 }
6095
6096                 let htlc_stats = self.context.get_pending_htlc_stats(None);
6097                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
6098                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6099                         (0, 0)
6100                 } else {
6101                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
6102                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
6103                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
6104                 };
6105                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
6106                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
6107                         let on_counterparty_tx_dust_htlc_exposure_msat = htlc_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
6108                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6109                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
6110                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6111                                 return Err(("Exceeded our dust exposure limit on counterparty commitment tx", 0x1000|7))
6112                         }
6113                 }
6114
6115                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
6116                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
6117                         let on_holder_tx_dust_htlc_exposure_msat = htlc_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
6118                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
6119                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
6120                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
6121                                 return Err(("Exceeded our dust exposure limit on holder commitment tx", 0x1000|7))
6122                         }
6123                 }
6124
6125                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6126                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
6127                 } else {
6128                         0
6129                 };
6130
6131                 let mut removed_outbound_total_msat = 0;
6132                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
6133                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
6134                                 removed_outbound_total_msat += htlc.amount_msat;
6135                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
6136                                 removed_outbound_total_msat += htlc.amount_msat;
6137                         }
6138                 }
6139
6140                 let pending_value_to_self_msat =
6141                         self.context.value_to_self_msat + htlc_stats.pending_inbound_htlcs_value_msat - removed_outbound_total_msat;
6142                 let pending_remote_value_msat =
6143                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
6144
6145                 if !self.context.is_outbound() {
6146                         // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
6147                         // the spec because the fee spike buffer requirement doesn't exist on the receiver's
6148                         // side, only on the sender's. Note that with anchor outputs we are no longer as
6149                         // sensitive to fee spikes, so we need to account for them.
6150                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
6151                         let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
6152                         if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
6153                                 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
6154                         }
6155                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
6156                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
6157                                 return Err(("Fee spike buffer violation", 0x1000|7));
6158                         }
6159                 }
6160
6161                 Ok(())
6162         }
6163
6164         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
6165                 self.context.cur_holder_commitment_transaction_number + 1
6166         }
6167
6168         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
6169                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
6170         }
6171
6172         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
6173                 self.context.cur_counterparty_commitment_transaction_number + 2
6174         }
6175
6176         #[cfg(test)]
6177         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
6178                 &self.context.holder_signer
6179         }
6180
6181         #[cfg(test)]
6182         pub fn get_value_stat(&self) -> ChannelValueStat {
6183                 ChannelValueStat {
6184                         value_to_self_msat: self.context.value_to_self_msat,
6185                         channel_value_msat: self.context.channel_value_satoshis * 1000,
6186                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
6187                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6188                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
6189                         holding_cell_outbound_amount_msat: {
6190                                 let mut res = 0;
6191                                 for h in self.context.holding_cell_htlc_updates.iter() {
6192                                         match h {
6193                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
6194                                                         res += amount_msat;
6195                                                 }
6196                                                 _ => {}
6197                                         }
6198                                 }
6199                                 res
6200                         },
6201                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
6202                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
6203                 }
6204         }
6205
6206         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
6207         /// Allowed in any state (including after shutdown)
6208         pub fn is_awaiting_monitor_update(&self) -> bool {
6209                 self.context.channel_state.is_monitor_update_in_progress()
6210         }
6211
6212         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
6213         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
6214                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
6215                 self.context.blocked_monitor_updates[0].update.update_id - 1
6216         }
6217
6218         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
6219         /// further blocked monitor update exists after the next.
6220         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
6221                 if self.context.blocked_monitor_updates.is_empty() { return None; }
6222                 Some((self.context.blocked_monitor_updates.remove(0).update,
6223                         !self.context.blocked_monitor_updates.is_empty()))
6224         }
6225
6226         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
6227         /// immediately given to the user for persisting or `None` if it should be held as blocked.
6228         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
6229         -> Option<ChannelMonitorUpdate> {
6230                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
6231                 if !release_monitor {
6232                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
6233                                 update,
6234                         });
6235                         None
6236                 } else {
6237                         Some(update)
6238                 }
6239         }
6240
6241         /// On startup, its possible we detect some monitor updates have actually completed (and the
6242         /// ChannelManager was simply stale). In that case, we should simply drop them, which we do
6243         /// here after logging them.
6244         pub fn on_startup_drop_completed_blocked_mon_updates_through<L: Logger>(&mut self, logger: &L, loaded_mon_update_id: u64) {
6245                 let channel_id = self.context.channel_id();
6246                 self.context.blocked_monitor_updates.retain(|update| {
6247                         if update.update.update_id <= loaded_mon_update_id {
6248                                 log_info!(
6249                                         logger,
6250                                         "Dropping completed ChannelMonitorUpdate id {} on channel {} due to a stale ChannelManager",
6251                                         update.update.update_id,
6252                                         channel_id,
6253                                 );
6254                                 false
6255                         } else {
6256                                 true
6257                         }
6258                 });
6259         }
6260
6261         pub fn blocked_monitor_updates_pending(&self) -> usize {
6262                 self.context.blocked_monitor_updates.len()
6263         }
6264
6265         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
6266         /// If the channel is outbound, this implies we have not yet broadcasted the funding
6267         /// transaction. If the channel is inbound, this implies simply that the channel has not
6268         /// advanced state.
6269         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
6270                 if !self.is_awaiting_monitor_update() { return false; }
6271                 if matches!(
6272                         self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
6273                         if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
6274                 ) {
6275                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
6276                         // AwaitingChannelReady set, though our peer could have sent their channel_ready.
6277                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
6278                         return true;
6279                 }
6280                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
6281                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
6282                         // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
6283                         // waiting for the initial monitor persistence. Thus, we check if our commitment
6284                         // transaction numbers have both been iterated only exactly once (for the
6285                         // funding_signed), and we're awaiting monitor update.
6286                         //
6287                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
6288                         // only way to get an awaiting-monitor-update state during initial funding is if the
6289                         // initial monitor persistence is still pending).
6290                         //
6291                         // Because deciding we're awaiting initial broadcast spuriously could result in
6292                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
6293                         // we hard-assert here, even in production builds.
6294                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
6295                         assert!(self.context.monitor_pending_channel_ready);
6296                         assert_eq!(self.context.latest_monitor_update_id, 0);
6297                         return true;
6298                 }
6299                 false
6300         }
6301
6302         /// Returns true if our channel_ready has been sent
6303         pub fn is_our_channel_ready(&self) -> bool {
6304                 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
6305                         matches!(self.context.channel_state, ChannelState::ChannelReady(_))
6306         }
6307
6308         /// Returns true if our peer has either initiated or agreed to shut down the channel.
6309         pub fn received_shutdown(&self) -> bool {
6310                 self.context.channel_state.is_remote_shutdown_sent()
6311         }
6312
6313         /// Returns true if we either initiated or agreed to shut down the channel.
6314         pub fn sent_shutdown(&self) -> bool {
6315                 self.context.channel_state.is_local_shutdown_sent()
6316         }
6317
6318         /// Returns true if we initiated to shut down the channel.
6319         pub fn initiated_shutdown(&self) -> bool {
6320                 self.context.local_initiated_shutdown.is_some()
6321         }
6322
6323         /// Returns true if this channel is fully shut down. True here implies that no further actions
6324         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
6325         /// will be handled appropriately by the chain monitor.
6326         pub fn is_shutdown(&self) -> bool {
6327                 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
6328         }
6329
6330         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
6331                 self.context.channel_update_status
6332         }
6333
6334         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
6335                 self.context.update_time_counter += 1;
6336                 self.context.channel_update_status = status;
6337         }
6338
6339         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
6340                 // Called:
6341                 //  * always when a new block/transactions are confirmed with the new height
6342                 //  * when funding is signed with a height of 0
6343                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
6344                         return None;
6345                 }
6346
6347                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6348                 if funding_tx_confirmations <= 0 {
6349                         self.context.funding_tx_confirmation_height = 0;
6350                 }
6351
6352                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
6353                         return None;
6354                 }
6355
6356                 // If we're still pending the signature on a funding transaction, then we're not ready to send a
6357                 // channel_ready yet.
6358                 if self.context.signer_pending_funding {
6359                         return None;
6360                 }
6361
6362                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
6363                 // channel_ready until the entire batch is ready.
6364                 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
6365                         self.context.channel_state.set_our_channel_ready();
6366                         true
6367                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
6368                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
6369                         self.context.update_time_counter += 1;
6370                         true
6371                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6372                         // We got a reorg but not enough to trigger a force close, just ignore.
6373                         false
6374                 } else {
6375                         if self.context.funding_tx_confirmation_height != 0 &&
6376                                 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6377                         {
6378                                 // We should never see a funding transaction on-chain until we've received
6379                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6380                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6381                                 // however, may do this and we shouldn't treat it as a bug.
6382                                 #[cfg(not(fuzzing))]
6383                                 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6384                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6385                                         self.context.channel_state.to_u32());
6386                         }
6387                         // We got a reorg but not enough to trigger a force close, just ignore.
6388                         false
6389                 };
6390
6391                 if need_commitment_update {
6392                         if !self.context.channel_state.is_monitor_update_in_progress() {
6393                                 if !self.context.channel_state.is_peer_disconnected() {
6394                                         let next_per_commitment_point =
6395                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6396                                         return Some(msgs::ChannelReady {
6397                                                 channel_id: self.context.channel_id,
6398                                                 next_per_commitment_point,
6399                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6400                                         });
6401                                 }
6402                         } else {
6403                                 self.context.monitor_pending_channel_ready = true;
6404                         }
6405                 }
6406                 None
6407         }
6408
6409         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6410         /// In the first case, we store the confirmation height and calculating the short channel id.
6411         /// In the second, we simply return an Err indicating we need to be force-closed now.
6412         pub fn transactions_confirmed<NS: Deref, L: Deref>(
6413                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6414                 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6415         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6416         where
6417                 NS::Target: NodeSigner,
6418                 L::Target: Logger
6419         {
6420                 let mut msgs = (None, None);
6421                 if let Some(funding_txo) = self.context.get_funding_txo() {
6422                         for &(index_in_block, tx) in txdata.iter() {
6423                                 // Check if the transaction is the expected funding transaction, and if it is,
6424                                 // check that it pays the right amount to the right script.
6425                                 if self.context.funding_tx_confirmation_height == 0 {
6426                                         if tx.txid() == funding_txo.txid {
6427                                                 let txo_idx = funding_txo.index as usize;
6428                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
6429                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
6430                                                         if self.context.is_outbound() {
6431                                                                 // If we generated the funding transaction and it doesn't match what it
6432                                                                 // should, the client is really broken and we should just panic and
6433                                                                 // tell them off. That said, because hash collisions happen with high
6434                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
6435                                                                 // channel and move on.
6436                                                                 #[cfg(not(fuzzing))]
6437                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6438                                                         }
6439                                                         self.context.update_time_counter += 1;
6440                                                         let err_reason = "funding tx had wrong script/value or output index";
6441                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6442                                                 } else {
6443                                                         if self.context.is_outbound() {
6444                                                                 if !tx.is_coin_base() {
6445                                                                         for input in tx.input.iter() {
6446                                                                                 if input.witness.is_empty() {
6447                                                                                         // We generated a malleable funding transaction, implying we've
6448                                                                                         // just exposed ourselves to funds loss to our counterparty.
6449                                                                                         #[cfg(not(fuzzing))]
6450                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6451                                                                                 }
6452                                                                         }
6453                                                                 }
6454                                                         }
6455                                                         self.context.funding_tx_confirmation_height = height;
6456                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
6457                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6458                                                                 Ok(scid) => Some(scid),
6459                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6460                                                         }
6461                                                 }
6462                                                 // If this is a coinbase transaction and not a 0-conf channel
6463                                                 // we should update our min_depth to 100 to handle coinbase maturity
6464                                                 if tx.is_coin_base() &&
6465                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
6466                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6467                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
6468                                                 }
6469                                         }
6470                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
6471                                         // send it immediately instead of waiting for a best_block_updated call (which
6472                                         // may have already happened for this block).
6473                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
6474                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6475                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6476                                                 msgs = (Some(channel_ready), announcement_sigs);
6477                                         }
6478                                 }
6479                                 for inp in tx.input.iter() {
6480                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6481                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6482                                                 return Err(ClosureReason::CommitmentTxConfirmed);
6483                                         }
6484                                 }
6485                         }
6486                 }
6487                 Ok(msgs)
6488         }
6489
6490         /// When a new block is connected, we check the height of the block against outbound holding
6491         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6492         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6493         /// handled by the ChannelMonitor.
6494         ///
6495         /// If we return Err, the channel may have been closed, at which point the standard
6496         /// requirements apply - no calls may be made except those explicitly stated to be allowed
6497         /// post-shutdown.
6498         ///
6499         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6500         /// back.
6501         pub fn best_block_updated<NS: Deref, L: Deref>(
6502                 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6503                 node_signer: &NS, user_config: &UserConfig, logger: &L
6504         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6505         where
6506                 NS::Target: NodeSigner,
6507                 L::Target: Logger
6508         {
6509                 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6510         }
6511
6512         fn do_best_block_updated<NS: Deref, L: Deref>(
6513                 &mut self, height: u32, highest_header_time: u32,
6514                 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6515         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6516         where
6517                 NS::Target: NodeSigner,
6518                 L::Target: Logger
6519         {
6520                 let mut timed_out_htlcs = Vec::new();
6521                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6522                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6523                 // ~now.
6524                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6525                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6526                         match htlc_update {
6527                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6528                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6529                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6530                                                 false
6531                                         } else { true }
6532                                 },
6533                                 _ => true
6534                         }
6535                 });
6536
6537                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6538
6539                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6540                         let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6541                                 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6542                         } else { None };
6543                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6544                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6545                 }
6546
6547                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6548                         self.context.channel_state.is_our_channel_ready() {
6549                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6550                         if self.context.funding_tx_confirmation_height == 0 {
6551                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6552                                 // zero if it has been reorged out, however in either case, our state flags
6553                                 // indicate we've already sent a channel_ready
6554                                 funding_tx_confirmations = 0;
6555                         }
6556
6557                         // If we've sent channel_ready (or have both sent and received channel_ready), and
6558                         // the funding transaction has become unconfirmed,
6559                         // close the channel and hope we can get the latest state on chain (because presumably
6560                         // the funding transaction is at least still in the mempool of most nodes).
6561                         //
6562                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6563                         // 0-conf channel, but not doing so may lead to the
6564                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
6565                         // to.
6566                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6567                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6568                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6569                                 return Err(ClosureReason::ProcessingError { err: err_reason });
6570                         }
6571                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6572                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6573                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6574                         // If funding_tx_confirmed_in is unset, the channel must not be active
6575                         assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6576                         assert!(!self.context.channel_state.is_our_channel_ready());
6577                         return Err(ClosureReason::FundingTimedOut);
6578                 }
6579
6580                 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6581                         self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6582                 } else { None };
6583                 Ok((None, timed_out_htlcs, announcement_sigs))
6584         }
6585
6586         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6587         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6588         /// before the channel has reached channel_ready and we can just wait for more blocks.
6589         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6590                 if self.context.funding_tx_confirmation_height != 0 {
6591                         // We handle the funding disconnection by calling best_block_updated with a height one
6592                         // below where our funding was connected, implying a reorg back to conf_height - 1.
6593                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
6594                         // We use the time field to bump the current time we set on channel updates if its
6595                         // larger. If we don't know that time has moved forward, we can just set it to the last
6596                         // time we saw and it will be ignored.
6597                         let best_time = self.context.update_time_counter;
6598                         match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6599                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6600                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6601                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6602                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6603                                         Ok(())
6604                                 },
6605                                 Err(e) => Err(e)
6606                         }
6607                 } else {
6608                         // We never learned about the funding confirmation anyway, just ignore
6609                         Ok(())
6610                 }
6611         }
6612
6613         // Methods to get unprompted messages to send to the remote end (or where we already returned
6614         // something in the handler for the message that prompted this message):
6615
6616         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6617         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6618         /// directions). Should be used for both broadcasted announcements and in response to an
6619         /// AnnouncementSignatures message from the remote peer.
6620         ///
6621         /// Will only fail if we're not in a state where channel_announcement may be sent (including
6622         /// closing).
6623         ///
6624         /// This will only return ChannelError::Ignore upon failure.
6625         ///
6626         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6627         fn get_channel_announcement<NS: Deref>(
6628                 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6629         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6630                 if !self.context.config.announced_channel {
6631                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6632                 }
6633                 if !self.context.is_usable() {
6634                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6635                 }
6636
6637                 let short_channel_id = self.context.get_short_channel_id()
6638                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6639                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6640                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6641                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6642                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6643
6644                 let msg = msgs::UnsignedChannelAnnouncement {
6645                         features: channelmanager::provided_channel_features(&user_config),
6646                         chain_hash,
6647                         short_channel_id,
6648                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6649                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6650                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6651                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6652                         excess_data: Vec::new(),
6653                 };
6654
6655                 Ok(msg)
6656         }
6657
6658         fn get_announcement_sigs<NS: Deref, L: Deref>(
6659                 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6660                 best_block_height: u32, logger: &L
6661         ) -> Option<msgs::AnnouncementSignatures>
6662         where
6663                 NS::Target: NodeSigner,
6664                 L::Target: Logger
6665         {
6666                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6667                         return None;
6668                 }
6669
6670                 if !self.context.is_usable() {
6671                         return None;
6672                 }
6673
6674                 if self.context.channel_state.is_peer_disconnected() {
6675                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6676                         return None;
6677                 }
6678
6679                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6680                         return None;
6681                 }
6682
6683                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6684                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6685                         Ok(a) => a,
6686                         Err(e) => {
6687                                 log_trace!(logger, "{:?}", e);
6688                                 return None;
6689                         }
6690                 };
6691                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6692                         Err(_) => {
6693                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6694                                 return None;
6695                         },
6696                         Ok(v) => v
6697                 };
6698                 match &self.context.holder_signer {
6699                         ChannelSignerType::Ecdsa(ecdsa) => {
6700                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6701                                         Err(_) => {
6702                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6703                                                 return None;
6704                                         },
6705                                         Ok(v) => v
6706                                 };
6707                                 let short_channel_id = match self.context.get_short_channel_id() {
6708                                         Some(scid) => scid,
6709                                         None => return None,
6710                                 };
6711
6712                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6713
6714                                 Some(msgs::AnnouncementSignatures {
6715                                         channel_id: self.context.channel_id(),
6716                                         short_channel_id,
6717                                         node_signature: our_node_sig,
6718                                         bitcoin_signature: our_bitcoin_sig,
6719                                 })
6720                         },
6721                         // TODO (taproot|arik)
6722                         #[cfg(taproot)]
6723                         _ => todo!()
6724                 }
6725         }
6726
6727         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6728         /// available.
6729         fn sign_channel_announcement<NS: Deref>(
6730                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6731         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6732                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6733                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6734                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6735                         let were_node_one = announcement.node_id_1 == our_node_key;
6736
6737                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6738                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6739                         match &self.context.holder_signer {
6740                                 ChannelSignerType::Ecdsa(ecdsa) => {
6741                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6742                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6743                                         Ok(msgs::ChannelAnnouncement {
6744                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6745                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6746                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6747                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6748                                                 contents: announcement,
6749                                         })
6750                                 },
6751                                 // TODO (taproot|arik)
6752                                 #[cfg(taproot)]
6753                                 _ => todo!()
6754                         }
6755                 } else {
6756                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6757                 }
6758         }
6759
6760         /// Processes an incoming announcement_signatures message, providing a fully-signed
6761         /// channel_announcement message which we can broadcast and storing our counterparty's
6762         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6763         pub fn announcement_signatures<NS: Deref>(
6764                 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6765                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6766         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6767                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6768
6769                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6770
6771                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6772                         return Err(ChannelError::Close(format!(
6773                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6774                                  &announcement, self.context.get_counterparty_node_id())));
6775                 }
6776                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6777                         return Err(ChannelError::Close(format!(
6778                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6779                                 &announcement, self.context.counterparty_funding_pubkey())));
6780                 }
6781
6782                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6783                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6784                         return Err(ChannelError::Ignore(
6785                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6786                 }
6787
6788                 self.sign_channel_announcement(node_signer, announcement)
6789         }
6790
6791         /// Gets a signed channel_announcement for this channel, if we previously received an
6792         /// announcement_signatures from our counterparty.
6793         pub fn get_signed_channel_announcement<NS: Deref>(
6794                 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6795         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6796                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6797                         return None;
6798                 }
6799                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6800                         Ok(res) => res,
6801                         Err(_) => return None,
6802                 };
6803                 match self.sign_channel_announcement(node_signer, announcement) {
6804                         Ok(res) => Some(res),
6805                         Err(_) => None,
6806                 }
6807         }
6808
6809         /// May panic if called on a channel that wasn't immediately-previously
6810         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6811         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6812                 assert!(self.context.channel_state.is_peer_disconnected());
6813                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6814                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6815                 // current to_remote balances. However, it no longer has any use, and thus is now simply
6816                 // set to a dummy (but valid, as required by the spec) public key.
6817                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6818                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6819                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6820                 let mut pk = [2; 33]; pk[1] = 0xff;
6821                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6822                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6823                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6824                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6825                         remote_last_secret
6826                 } else {
6827                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6828                         [0;32]
6829                 };
6830                 self.mark_awaiting_response();
6831                 msgs::ChannelReestablish {
6832                         channel_id: self.context.channel_id(),
6833                         // The protocol has two different commitment number concepts - the "commitment
6834                         // transaction number", which starts from 0 and counts up, and the "revocation key
6835                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6836                         // commitment transaction numbers by the index which will be used to reveal the
6837                         // revocation key for that commitment transaction, which means we have to convert them
6838                         // to protocol-level commitment numbers here...
6839
6840                         // next_local_commitment_number is the next commitment_signed number we expect to
6841                         // receive (indicating if they need to resend one that we missed).
6842                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6843                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6844                         // receive, however we track it by the next commitment number for a remote transaction
6845                         // (which is one further, as they always revoke previous commitment transaction, not
6846                         // the one we send) so we have to decrement by 1. Note that if
6847                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6848                         // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6849                         // overflow here.
6850                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6851                         your_last_per_commitment_secret: remote_last_secret,
6852                         my_current_per_commitment_point: dummy_pubkey,
6853                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6854                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6855                         // txid of that interactive transaction, else we MUST NOT set it.
6856                         next_funding_txid: None,
6857                 }
6858         }
6859
6860
6861         // Send stuff to our remote peers:
6862
6863         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6864         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6865         /// commitment update.
6866         ///
6867         /// `Err`s will only be [`ChannelError::Ignore`].
6868         pub fn queue_add_htlc<F: Deref, L: Deref>(
6869                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6870                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6871                 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6872         ) -> Result<(), ChannelError>
6873         where F::Target: FeeEstimator, L::Target: Logger
6874         {
6875                 self
6876                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6877                                 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6878                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6879                         .map_err(|err| {
6880                                 if let ChannelError::Ignore(_) = err { /* fine */ }
6881                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6882                                 err
6883                         })
6884         }
6885
6886         /// Adds a pending outbound HTLC to this channel, note that you probably want
6887         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6888         ///
6889         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6890         /// the wire:
6891         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6892         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6893         ///   awaiting ACK.
6894         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6895         ///   we may not yet have sent the previous commitment update messages and will need to
6896         ///   regenerate them.
6897         ///
6898         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6899         /// on this [`Channel`] if `force_holding_cell` is false.
6900         ///
6901         /// `Err`s will only be [`ChannelError::Ignore`].
6902         fn send_htlc<F: Deref, L: Deref>(
6903                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6904                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6905                 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6906                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6907         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6908         where F::Target: FeeEstimator, L::Target: Logger
6909         {
6910                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6911                         self.context.channel_state.is_local_shutdown_sent() ||
6912                         self.context.channel_state.is_remote_shutdown_sent()
6913                 {
6914                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6915                 }
6916                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6917                 if amount_msat > channel_total_msat {
6918                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6919                 }
6920
6921                 if amount_msat == 0 {
6922                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6923                 }
6924
6925                 let available_balances = self.context.get_available_balances(fee_estimator);
6926                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6927                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6928                                 available_balances.next_outbound_htlc_minimum_msat)));
6929                 }
6930
6931                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6932                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6933                                 available_balances.next_outbound_htlc_limit_msat)));
6934                 }
6935
6936                 if self.context.channel_state.is_peer_disconnected() {
6937                         // Note that this should never really happen, if we're !is_live() on receipt of an
6938                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6939                         // the user to send directly into a !is_live() channel. However, if we
6940                         // disconnected during the time the previous hop was doing the commitment dance we may
6941                         // end up getting here after the forwarding delay. In any case, returning an
6942                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6943                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6944                 }
6945
6946                 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
6947                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
6948                         payment_hash, amount_msat,
6949                         if force_holding_cell { "into holding cell" }
6950                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6951                         else { "to peer" });
6952
6953                 if need_holding_cell {
6954                         force_holding_cell = true;
6955                 }
6956
6957                 // Now update local state:
6958                 if force_holding_cell {
6959                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6960                                 amount_msat,
6961                                 payment_hash,
6962                                 cltv_expiry,
6963                                 source,
6964                                 onion_routing_packet,
6965                                 skimmed_fee_msat,
6966                                 blinding_point,
6967                         });
6968                         return Ok(None);
6969                 }
6970
6971                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
6972                         htlc_id: self.context.next_holder_htlc_id,
6973                         amount_msat,
6974                         payment_hash: payment_hash.clone(),
6975                         cltv_expiry,
6976                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6977                         source,
6978                         blinding_point,
6979                         skimmed_fee_msat,
6980                 });
6981
6982                 let res = msgs::UpdateAddHTLC {
6983                         channel_id: self.context.channel_id,
6984                         htlc_id: self.context.next_holder_htlc_id,
6985                         amount_msat,
6986                         payment_hash,
6987                         cltv_expiry,
6988                         onion_routing_packet,
6989                         skimmed_fee_msat,
6990                         blinding_point,
6991                 };
6992                 self.context.next_holder_htlc_id += 1;
6993
6994                 Ok(Some(res))
6995         }
6996
6997         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6998                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6999                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
7000                 // fail to generate this, we still are at least at a position where upgrading their status
7001                 // is acceptable.
7002                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
7003                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
7004                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
7005                         } else { None };
7006                         if let Some(state) = new_state {
7007                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
7008                                 htlc.state = state;
7009                         }
7010                 }
7011                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
7012                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
7013                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
7014                                 // Grab the preimage, if it exists, instead of cloning
7015                                 let mut reason = OutboundHTLCOutcome::Success(None);
7016                                 mem::swap(outcome, &mut reason);
7017                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
7018                         }
7019                 }
7020                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
7021                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
7022                                 debug_assert!(!self.context.is_outbound());
7023                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
7024                                 self.context.feerate_per_kw = feerate;
7025                                 self.context.pending_update_fee = None;
7026                         }
7027                 }
7028                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
7029
7030                 let (mut htlcs_ref, counterparty_commitment_tx) =
7031                         self.build_commitment_no_state_update(logger);
7032                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
7033                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
7034                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
7035
7036                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
7037                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
7038                 }
7039
7040                 self.context.latest_monitor_update_id += 1;
7041                 let monitor_update = ChannelMonitorUpdate {
7042                         update_id: self.context.latest_monitor_update_id,
7043                         counterparty_node_id: Some(self.context.counterparty_node_id),
7044                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
7045                                 commitment_txid: counterparty_commitment_txid,
7046                                 htlc_outputs: htlcs.clone(),
7047                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
7048                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
7049                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
7050                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
7051                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
7052                         }],
7053                         channel_id: Some(self.context.channel_id()),
7054                 };
7055                 self.context.channel_state.set_awaiting_remote_revoke();
7056                 monitor_update
7057         }
7058
7059         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
7060         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
7061         where L::Target: Logger
7062         {
7063                 let counterparty_keys = self.context.build_remote_transaction_keys();
7064                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7065                 let counterparty_commitment_tx = commitment_stats.tx;
7066
7067                 #[cfg(any(test, fuzzing))]
7068                 {
7069                         if !self.context.is_outbound() {
7070                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
7071                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
7072                                 if let Some(info) = projected_commit_tx_info {
7073                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
7074                                         if info.total_pending_htlcs == total_pending_htlcs
7075                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
7076                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
7077                                                 && info.feerate == self.context.feerate_per_kw {
7078                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
7079                                                         assert_eq!(actual_fee, info.fee);
7080                                                 }
7081                                 }
7082                         }
7083                 }
7084
7085                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
7086         }
7087
7088         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
7089         /// generation when we shouldn't change HTLC/channel state.
7090         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
7091                 // Get the fee tests from `build_commitment_no_state_update`
7092                 #[cfg(any(test, fuzzing))]
7093                 self.build_commitment_no_state_update(logger);
7094
7095                 let counterparty_keys = self.context.build_remote_transaction_keys();
7096                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
7097                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
7098
7099                 match &self.context.holder_signer {
7100                         ChannelSignerType::Ecdsa(ecdsa) => {
7101                                 let (signature, htlc_signatures);
7102
7103                                 {
7104                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
7105                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
7106                                                 htlcs.push(htlc);
7107                                         }
7108
7109                                         let res = ecdsa.sign_counterparty_commitment(
7110                                                         &commitment_stats.tx,
7111                                                         commitment_stats.inbound_htlc_preimages,
7112                                                         commitment_stats.outbound_htlc_preimages,
7113                                                         &self.context.secp_ctx,
7114                                                 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
7115                                         signature = res.0;
7116                                         htlc_signatures = res.1;
7117
7118                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
7119                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
7120                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
7121                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
7122
7123                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
7124                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
7125                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
7126                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
7127                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
7128                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
7129                                         }
7130                                 }
7131
7132                                 Ok((msgs::CommitmentSigned {
7133                                         channel_id: self.context.channel_id,
7134                                         signature,
7135                                         htlc_signatures,
7136                                         #[cfg(taproot)]
7137                                         partial_signature_with_nonce: None,
7138                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
7139                         },
7140                         // TODO (taproot|arik)
7141                         #[cfg(taproot)]
7142                         _ => todo!()
7143                 }
7144         }
7145
7146         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
7147         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
7148         ///
7149         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
7150         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
7151         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
7152                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
7153                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
7154                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
7155         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
7156         where F::Target: FeeEstimator, L::Target: Logger
7157         {
7158                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
7159                         onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
7160                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
7161                 match send_res? {
7162                         Some(_) => {
7163                                 let monitor_update = self.build_commitment_no_status_check(logger);
7164                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
7165                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
7166                         },
7167                         None => Ok(None)
7168                 }
7169         }
7170
7171         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
7172         /// happened.
7173         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
7174                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
7175                         fee_base_msat: msg.contents.fee_base_msat,
7176                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
7177                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
7178                 });
7179                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
7180                 if did_change {
7181                         self.context.counterparty_forwarding_info = new_forwarding_info;
7182                 }
7183
7184                 Ok(did_change)
7185         }
7186
7187         /// Begins the shutdown process, getting a message for the remote peer and returning all
7188         /// holding cell HTLCs for payment failure.
7189         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
7190                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
7191         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
7192         {
7193                 for htlc in self.context.pending_outbound_htlcs.iter() {
7194                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
7195                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
7196                         }
7197                 }
7198                 if self.context.channel_state.is_local_shutdown_sent() {
7199                         return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
7200                 }
7201                 else if self.context.channel_state.is_remote_shutdown_sent() {
7202                         return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
7203                 }
7204                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
7205                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
7206                 }
7207                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
7208                 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
7209                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
7210                 }
7211
7212                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
7213                         Some(_) => false,
7214                         None => {
7215                                 // use override shutdown script if provided
7216                                 let shutdown_scriptpubkey = match override_shutdown_script {
7217                                         Some(script) => script,
7218                                         None => {
7219                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
7220                                                 match signer_provider.get_shutdown_scriptpubkey() {
7221                                                         Ok(scriptpubkey) => scriptpubkey,
7222                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
7223                                                 }
7224                                         },
7225                                 };
7226                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
7227                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
7228                                 }
7229                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
7230                                 true
7231                         },
7232                 };
7233
7234                 // From here on out, we may not fail!
7235                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
7236                 self.context.channel_state.set_local_shutdown_sent();
7237                 self.context.local_initiated_shutdown = Some(());
7238                 self.context.update_time_counter += 1;
7239
7240                 let monitor_update = if update_shutdown_script {
7241                         self.context.latest_monitor_update_id += 1;
7242                         let monitor_update = ChannelMonitorUpdate {
7243                                 update_id: self.context.latest_monitor_update_id,
7244                                 counterparty_node_id: Some(self.context.counterparty_node_id),
7245                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
7246                                         scriptpubkey: self.get_closing_scriptpubkey(),
7247                                 }],
7248                                 channel_id: Some(self.context.channel_id()),
7249                         };
7250                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
7251                         self.push_ret_blockable_mon_update(monitor_update)
7252                 } else { None };
7253                 let shutdown = msgs::Shutdown {
7254                         channel_id: self.context.channel_id,
7255                         scriptpubkey: self.get_closing_scriptpubkey(),
7256                 };
7257
7258                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
7259                 // our shutdown until we've committed all of the pending changes.
7260                 self.context.holding_cell_update_fee = None;
7261                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
7262                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
7263                         match htlc_update {
7264                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
7265                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
7266                                         false
7267                                 },
7268                                 _ => true
7269                         }
7270                 });
7271
7272                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
7273                         "we can't both complete shutdown and return a monitor update");
7274
7275                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
7276         }
7277
7278         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
7279                 self.context.holding_cell_htlc_updates.iter()
7280                         .flat_map(|htlc_update| {
7281                                 match htlc_update {
7282                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
7283                                                 => Some((source, payment_hash)),
7284                                         _ => None,
7285                                 }
7286                         })
7287                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
7288         }
7289 }
7290
7291 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
7292 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7293         pub context: ChannelContext<SP>,
7294         pub unfunded_context: UnfundedChannelContext,
7295 }
7296
7297 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
7298         pub fn new<ES: Deref, F: Deref>(
7299                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
7300                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
7301                 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
7302         ) -> Result<OutboundV1Channel<SP>, APIError>
7303         where ES::Target: EntropySource,
7304               F::Target: FeeEstimator
7305         {
7306                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
7307                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7308                         // Protocol level safety check in place, although it should never happen because
7309                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
7310                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel reserve below \
7311                                 implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
7312                 }
7313
7314                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
7315                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
7316                 let pubkeys = holder_signer.pubkeys().clone();
7317
7318                 let chan = Self {
7319                         context: ChannelContext::new_for_outbound_channel(
7320                                 fee_estimator,
7321                                 entropy_source,
7322                                 signer_provider,
7323                                 counterparty_node_id,
7324                                 their_features,
7325                                 channel_value_satoshis,
7326                                 push_msat,
7327                                 user_id,
7328                                 config,
7329                                 current_chain_height,
7330                                 outbound_scid_alias,
7331                                 temporary_channel_id,
7332                                 holder_selected_channel_reserve_satoshis,
7333                                 channel_keys_id,
7334                                 holder_signer,
7335                                 pubkeys,
7336                         )?,
7337                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7338                 };
7339                 Ok(chan)
7340         }
7341
7342         /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7343         fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7344                 let counterparty_keys = self.context.build_remote_transaction_keys();
7345                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7346                 let signature = match &self.context.holder_signer {
7347                         // TODO (taproot|arik): move match into calling method for Taproot
7348                         ChannelSignerType::Ecdsa(ecdsa) => {
7349                                 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7350                                         .map(|(sig, _)| sig).ok()?
7351                         },
7352                         // TODO (taproot|arik)
7353                         #[cfg(taproot)]
7354                         _ => todo!()
7355                 };
7356
7357                 if self.context.signer_pending_funding {
7358                         log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7359                         self.context.signer_pending_funding = false;
7360                 }
7361
7362                 Some(msgs::FundingCreated {
7363                         temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7364                         funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7365                         funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7366                         signature,
7367                         #[cfg(taproot)]
7368                         partial_signature_with_nonce: None,
7369                         #[cfg(taproot)]
7370                         next_local_nonce: None,
7371                 })
7372         }
7373
7374         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7375         /// a funding_created message for the remote peer.
7376         /// Panics if called at some time other than immediately after initial handshake, if called twice,
7377         /// or if called on an inbound channel.
7378         /// Note that channel_id changes during this call!
7379         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7380         /// If an Err is returned, it is a ChannelError::Close.
7381         pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7382         -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7383                 if !self.context.is_outbound() {
7384                         panic!("Tried to create outbound funding_created message on an inbound channel!");
7385                 }
7386                 if !matches!(
7387                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7388                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7389                 ) {
7390                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7391                 }
7392                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7393                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7394                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7395                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7396                 }
7397
7398                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7399                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7400
7401                 // Now that we're past error-generating stuff, update our local state:
7402
7403                 self.context.channel_state = ChannelState::FundingNegotiated;
7404                 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7405
7406                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7407                 // We can skip this if it is a zero-conf channel.
7408                 if funding_transaction.is_coin_base() &&
7409                         self.context.minimum_depth.unwrap_or(0) > 0 &&
7410                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7411                         self.context.minimum_depth = Some(COINBASE_MATURITY);
7412                 }
7413
7414                 self.context.funding_transaction = Some(funding_transaction);
7415                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7416
7417                 let funding_created = self.get_funding_created_msg(logger);
7418                 if funding_created.is_none() {
7419                         #[cfg(not(async_signing))] {
7420                                 panic!("Failed to get signature for new funding creation");
7421                         }
7422                         #[cfg(async_signing)] {
7423                                 if !self.context.signer_pending_funding {
7424                                         log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7425                                         self.context.signer_pending_funding = true;
7426                                 }
7427                         }
7428                 }
7429
7430                 Ok(funding_created)
7431         }
7432
7433         /// If we receive an error message, it may only be a rejection of the channel type we tried,
7434         /// not of our ability to open any channel at all. Thus, on error, we should first call this
7435         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7436         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7437                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7438         ) -> Result<msgs::OpenChannel, ()>
7439         where
7440                 F::Target: FeeEstimator
7441         {
7442                 self.context.maybe_downgrade_channel_features(fee_estimator)?;
7443                 Ok(self.get_open_channel(chain_hash))
7444         }
7445
7446         pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7447                 if !self.context.is_outbound() {
7448                         panic!("Tried to open a channel for an inbound channel?");
7449                 }
7450                 if self.context.have_received_message() {
7451                         panic!("Cannot generate an open_channel after we've moved forward");
7452                 }
7453
7454                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7455                         panic!("Tried to send an open_channel for a channel that has already advanced");
7456                 }
7457
7458                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7459                 let keys = self.context.get_holder_pubkeys();
7460
7461                 msgs::OpenChannel {
7462                         common_fields: msgs::CommonOpenChannelFields {
7463                                 chain_hash,
7464                                 temporary_channel_id: self.context.channel_id,
7465                                 funding_satoshis: self.context.channel_value_satoshis,
7466                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7467                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7468                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7469                                 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7470                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
7471                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7472                                 funding_pubkey: keys.funding_pubkey,
7473                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7474                                 payment_basepoint: keys.payment_point,
7475                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7476                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7477                                 first_per_commitment_point,
7478                                 channel_flags: if self.context.config.announced_channel {1} else {0},
7479                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7480                                         Some(script) => script.clone().into_inner(),
7481                                         None => Builder::new().into_script(),
7482                                 }),
7483                                 channel_type: Some(self.context.channel_type.clone()),
7484                         },
7485                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7486                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7487                 }
7488         }
7489
7490         // Message handlers
7491         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7492                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7493
7494                 // Check sanity of message fields:
7495                 if !self.context.is_outbound() {
7496                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7497                 }
7498                 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7499                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7500                 }
7501                 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7502                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7503                 }
7504                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7505                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7506                 }
7507                 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7508                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7509                 }
7510                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7511                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7512                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7513                 }
7514                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7515                 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7516                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7517                 }
7518                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7519                 if msg.common_fields.to_self_delay > max_delay_acceptable {
7520                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7521                 }
7522                 if msg.common_fields.max_accepted_htlcs < 1 {
7523                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7524                 }
7525                 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7526                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7527                 }
7528
7529                 // Now check against optional parameters as set by config...
7530                 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7531                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7532                 }
7533                 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7534                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7535                 }
7536                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7537                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7538                 }
7539                 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7540                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7541                 }
7542                 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7543                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7544                 }
7545                 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7546                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7547                 }
7548                 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7549                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7550                 }
7551
7552                 if let Some(ty) = &msg.common_fields.channel_type {
7553                         if *ty != self.context.channel_type {
7554                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7555                         }
7556                 } else if their_features.supports_channel_type() {
7557                         // Assume they've accepted the channel type as they said they understand it.
7558                 } else {
7559                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
7560                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7561                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7562                         }
7563                         self.context.channel_type = channel_type.clone();
7564                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
7565                 }
7566
7567                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7568                         match &msg.common_fields.shutdown_scriptpubkey {
7569                                 &Some(ref script) => {
7570                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7571                                         if script.len() == 0 {
7572                                                 None
7573                                         } else {
7574                                                 if !script::is_bolt2_compliant(&script, their_features) {
7575                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7576                                                 }
7577                                                 Some(script.clone())
7578                                         }
7579                                 },
7580                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7581                                 &None => {
7582                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7583                                 }
7584                         }
7585                 } else { None };
7586
7587                 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7588                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7589                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7590                 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7591                 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7592
7593                 if peer_limits.trust_own_funding_0conf {
7594                         self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7595                 } else {
7596                         self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7597                 }
7598
7599                 let counterparty_pubkeys = ChannelPublicKeys {
7600                         funding_pubkey: msg.common_fields.funding_pubkey,
7601                         revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7602                         payment_point: msg.common_fields.payment_basepoint,
7603                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7604                         htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7605                 };
7606
7607                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7608                         selected_contest_delay: msg.common_fields.to_self_delay,
7609                         pubkeys: counterparty_pubkeys,
7610                 });
7611
7612                 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7613                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7614
7615                 self.context.channel_state = ChannelState::NegotiatingFunding(
7616                         NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7617                 );
7618                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7619
7620                 Ok(())
7621         }
7622
7623         /// Handles a funding_signed message from the remote end.
7624         /// If this call is successful, broadcast the funding transaction (and not before!)
7625         pub fn funding_signed<L: Deref>(
7626                 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7627         ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7628         where
7629                 L::Target: Logger
7630         {
7631                 if !self.context.is_outbound() {
7632                         return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7633                 }
7634                 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7635                         return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7636                 }
7637                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7638                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7639                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7640                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7641                 }
7642
7643                 let funding_script = self.context.get_funding_redeemscript();
7644
7645                 let counterparty_keys = self.context.build_remote_transaction_keys();
7646                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7647                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7648                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7649
7650                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7651                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7652
7653                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7654                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7655                 {
7656                         let trusted_tx = initial_commitment_tx.trust();
7657                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7658                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7659                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7660                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7661                                 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7662                         }
7663                 }
7664
7665                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7666                         initial_commitment_tx,
7667                         msg.signature,
7668                         Vec::new(),
7669                         &self.context.get_holder_pubkeys().funding_pubkey,
7670                         self.context.counterparty_funding_pubkey()
7671                 );
7672
7673                 let validated =
7674                         self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7675                 if validated.is_err() {
7676                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7677                 }
7678
7679                 let funding_redeemscript = self.context.get_funding_redeemscript();
7680                 let funding_txo = self.context.get_funding_txo().unwrap();
7681                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7682                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7683                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7684                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7685                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7686                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7687                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
7688                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
7689                                                           &self.context.channel_transaction_parameters,
7690                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
7691                                                           obscure_factor,
7692                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7693                 channel_monitor.provide_initial_counterparty_commitment_tx(
7694                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
7695                         self.context.cur_counterparty_commitment_transaction_number,
7696                         self.context.counterparty_cur_commitment_point.unwrap(),
7697                         counterparty_initial_commitment_tx.feerate_per_kw(),
7698                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7699                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7700
7701                 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7702                 if self.context.is_batch_funding() {
7703                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7704                 } else {
7705                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7706                 }
7707                 self.context.cur_holder_commitment_transaction_number -= 1;
7708                 self.context.cur_counterparty_commitment_transaction_number -= 1;
7709
7710                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7711
7712                 let mut channel = Channel {
7713                         context: self.context,
7714                         #[cfg(any(dual_funding, splicing))]
7715                         dual_funding_channel_context: None,
7716                 };
7717
7718                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7719                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7720                 Ok((channel, channel_monitor))
7721         }
7722
7723         /// Indicates that the signer may have some signatures for us, so we should retry if we're
7724         /// blocked.
7725         #[cfg(async_signing)]
7726         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7727                 if self.context.signer_pending_funding && self.context.is_outbound() {
7728                         log_trace!(logger, "Signer unblocked a funding_created");
7729                         self.get_funding_created_msg(logger)
7730                 } else { None }
7731         }
7732 }
7733
7734 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7735 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7736         pub context: ChannelContext<SP>,
7737         pub unfunded_context: UnfundedChannelContext,
7738 }
7739
7740 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7741 /// [`msgs::CommonOpenChannelFields`].
7742 pub(super) fn channel_type_from_open_channel(
7743         common_fields: &msgs::CommonOpenChannelFields, their_features: &InitFeatures,
7744         our_supported_features: &ChannelTypeFeatures
7745 ) -> Result<ChannelTypeFeatures, ChannelError> {
7746         if let Some(channel_type) = &common_fields.channel_type {
7747                 if channel_type.supports_any_optional_bits() {
7748                         return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7749                 }
7750
7751                 // We only support the channel types defined by the `ChannelManager` in
7752                 // `provided_channel_type_features`. The channel type must always support
7753                 // `static_remote_key`.
7754                 if !channel_type.requires_static_remote_key() {
7755                         return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7756                 }
7757                 // Make sure we support all of the features behind the channel type.
7758                 if !channel_type.is_subset(our_supported_features) {
7759                         return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7760                 }
7761                 let announced_channel = if (common_fields.channel_flags & 1) == 1 { true } else { false };
7762                 if channel_type.requires_scid_privacy() && announced_channel {
7763                         return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7764                 }
7765                 Ok(channel_type.clone())
7766         } else {
7767                 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7768                 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7769                         return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7770                 }
7771                 Ok(channel_type)
7772         }
7773 }
7774
7775 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7776         /// Creates a new channel from a remote sides' request for one.
7777         /// Assumes chain_hash has already been checked and corresponds with what we expect!
7778         pub fn new<ES: Deref, F: Deref, L: Deref>(
7779                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7780                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7781                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7782                 current_chain_height: u32, logger: &L, is_0conf: bool,
7783         ) -> Result<InboundV1Channel<SP>, ChannelError>
7784                 where ES::Target: EntropySource,
7785                           F::Target: FeeEstimator,
7786                           L::Target: Logger,
7787         {
7788                 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
7789
7790                 // First check the channel type is known, failing before we do anything else if we don't
7791                 // support this channel type.
7792                 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
7793
7794                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7795                 let counterparty_pubkeys = ChannelPublicKeys {
7796                         funding_pubkey: msg.common_fields.funding_pubkey,
7797                         revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7798                         payment_point: msg.common_fields.payment_basepoint,
7799                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7800                         htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7801                 };
7802
7803                 let chan = Self {
7804                         context: ChannelContext::new_for_inbound_channel(
7805                                 fee_estimator,
7806                                 entropy_source,
7807                                 signer_provider,
7808                                 counterparty_node_id,
7809                                 their_features,
7810                                 user_id,
7811                                 config,
7812                                 current_chain_height,
7813                                 &&logger,
7814                                 is_0conf,
7815                                 0,
7816
7817                                 counterparty_pubkeys,
7818                                 channel_type,
7819                                 holder_selected_channel_reserve_satoshis,
7820                                 msg.channel_reserve_satoshis,
7821                                 msg.push_msat,
7822                                 msg.common_fields.clone(),
7823                         )?,
7824                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7825                 };
7826                 Ok(chan)
7827         }
7828
7829         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7830         /// should be sent back to the counterparty node.
7831         ///
7832         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7833         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7834                 if self.context.is_outbound() {
7835                         panic!("Tried to send accept_channel for an outbound channel?");
7836                 }
7837                 if !matches!(
7838                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7839                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7840                 ) {
7841                         panic!("Tried to send accept_channel after channel had moved forward");
7842                 }
7843                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7844                         panic!("Tried to send an accept_channel for a channel that has already advanced");
7845                 }
7846
7847                 self.generate_accept_channel_message()
7848         }
7849
7850         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7851         /// inbound channel. If the intention is to accept an inbound channel, use
7852         /// [`InboundV1Channel::accept_inbound_channel`] instead.
7853         ///
7854         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7855         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7856                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7857                 let keys = self.context.get_holder_pubkeys();
7858
7859                 msgs::AcceptChannel {
7860                         common_fields: msgs::CommonAcceptChannelFields {
7861                                 temporary_channel_id: self.context.channel_id,
7862                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7863                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7864                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7865                                 minimum_depth: self.context.minimum_depth.unwrap(),
7866                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
7867                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7868                                 funding_pubkey: keys.funding_pubkey,
7869                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7870                                 payment_basepoint: keys.payment_point,
7871                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7872                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7873                                 first_per_commitment_point,
7874                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7875                                         Some(script) => script.clone().into_inner(),
7876                                         None => Builder::new().into_script(),
7877                                 }),
7878                                 channel_type: Some(self.context.channel_type.clone()),
7879                         },
7880                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7881                         #[cfg(taproot)]
7882                         next_local_nonce: None,
7883                 }
7884         }
7885
7886         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7887         /// inbound channel without accepting it.
7888         ///
7889         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7890         #[cfg(test)]
7891         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7892                 self.generate_accept_channel_message()
7893         }
7894
7895         fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7896                 let funding_script = self.context.get_funding_redeemscript();
7897
7898                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7899                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7900                 let trusted_tx = initial_commitment_tx.trust();
7901                 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7902                 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7903                 // They sign the holder commitment transaction...
7904                 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7905                         log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7906                         encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7907                         encode::serialize_hex(&funding_script), &self.context.channel_id());
7908                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7909
7910                 Ok(initial_commitment_tx)
7911         }
7912
7913         pub fn funding_created<L: Deref>(
7914                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7915         ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7916         where
7917                 L::Target: Logger
7918         {
7919                 if self.context.is_outbound() {
7920                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7921                 }
7922                 if !matches!(
7923                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7924                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7925                 ) {
7926                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7927                         // remember the channel, so it's safe to just send an error_message here and drop the
7928                         // channel.
7929                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7930                 }
7931                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7932                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7933                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7934                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7935                 }
7936
7937                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7938                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7939                 // This is an externally observable change before we finish all our checks.  In particular
7940                 // check_funding_created_signature may fail.
7941                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7942
7943                 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7944                         Ok(res) => res,
7945                         Err(ChannelError::Close(e)) => {
7946                                 self.context.channel_transaction_parameters.funding_outpoint = None;
7947                                 return Err((self, ChannelError::Close(e)));
7948                         },
7949                         Err(e) => {
7950                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
7951                                 // to make sure we don't continue with an inconsistent state.
7952                                 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7953                         }
7954                 };
7955
7956                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7957                         initial_commitment_tx,
7958                         msg.signature,
7959                         Vec::new(),
7960                         &self.context.get_holder_pubkeys().funding_pubkey,
7961                         self.context.counterparty_funding_pubkey()
7962                 );
7963
7964                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7965                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7966                 }
7967
7968                 // Now that we're past error-generating stuff, update our local state:
7969
7970                 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7971                 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7972                 self.context.cur_counterparty_commitment_transaction_number -= 1;
7973                 self.context.cur_holder_commitment_transaction_number -= 1;
7974
7975                 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7976
7977                 let funding_redeemscript = self.context.get_funding_redeemscript();
7978                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7979                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7980                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7981                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7982                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7983                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7984                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
7985                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7986                                                           &self.context.channel_transaction_parameters,
7987                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
7988                                                           obscure_factor,
7989                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7990                 channel_monitor.provide_initial_counterparty_commitment_tx(
7991                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7992                         self.context.cur_counterparty_commitment_transaction_number + 1,
7993                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7994                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7995                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7996
7997                 log_info!(logger, "{} funding_signed for peer for channel {}",
7998                         if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7999
8000                 // Promote the channel to a full-fledged one now that we have updated the state and have a
8001                 // `ChannelMonitor`.
8002                 let mut channel = Channel {
8003                         context: self.context,
8004                         #[cfg(any(dual_funding, splicing))]
8005                         dual_funding_channel_context: None,
8006                 };
8007                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
8008                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
8009
8010                 Ok((channel, funding_signed, channel_monitor))
8011         }
8012 }
8013
8014 // A not-yet-funded outbound (from holder) channel using V2 channel establishment.
8015 #[cfg(any(dual_funding, splicing))]
8016 pub(super) struct OutboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8017         pub context: ChannelContext<SP>,
8018         pub unfunded_context: UnfundedChannelContext,
8019         #[cfg(any(dual_funding, splicing))]
8020         pub dual_funding_context: DualFundingChannelContext,
8021 }
8022
8023 #[cfg(any(dual_funding, splicing))]
8024 impl<SP: Deref> OutboundV2Channel<SP> where SP::Target: SignerProvider {
8025         pub fn new<ES: Deref, F: Deref>(
8026                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8027                 counterparty_node_id: PublicKey, their_features: &InitFeatures, funding_satoshis: u64,
8028                 user_id: u128, config: &UserConfig, current_chain_height: u32, outbound_scid_alias: u64,
8029                 funding_confirmation_target: ConfirmationTarget,
8030         ) -> Result<OutboundV2Channel<SP>, APIError>
8031         where ES::Target: EntropySource,
8032               F::Target: FeeEstimator,
8033         {
8034                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, funding_satoshis, user_id);
8035                 let holder_signer = signer_provider.derive_channel_signer(funding_satoshis, channel_keys_id);
8036                 let pubkeys = holder_signer.pubkeys().clone();
8037
8038                 let temporary_channel_id = Some(ChannelId::temporary_v2_from_revocation_basepoint(&pubkeys.revocation_basepoint));
8039
8040                 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8041                         funding_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8042
8043                 let funding_feerate_sat_per_1000_weight = fee_estimator.bounded_sat_per_1000_weight(funding_confirmation_target);
8044                 let funding_tx_locktime = current_chain_height;
8045
8046                 let chan = Self {
8047                         context: ChannelContext::new_for_outbound_channel(
8048                                 fee_estimator,
8049                                 entropy_source,
8050                                 signer_provider,
8051                                 counterparty_node_id,
8052                                 their_features,
8053                                 funding_satoshis,
8054                                 0,
8055                                 user_id,
8056                                 config,
8057                                 current_chain_height,
8058                                 outbound_scid_alias,
8059                                 temporary_channel_id,
8060                                 holder_selected_channel_reserve_satoshis,
8061                                 channel_keys_id,
8062                                 holder_signer,
8063                                 pubkeys,
8064                         )?,
8065                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8066                         dual_funding_context: DualFundingChannelContext {
8067                                 our_funding_satoshis: funding_satoshis,
8068                                 their_funding_satoshis: 0,
8069                                 funding_tx_locktime,
8070                                 funding_feerate_sat_per_1000_weight,
8071                         }
8072                 };
8073                 Ok(chan)
8074         }
8075
8076         /// If we receive an error message, it may only be a rejection of the channel type we tried,
8077         /// not of our ability to open any channel at all. Thus, on error, we should first call this
8078         /// and see if we get a new `OpenChannelV2` message, otherwise the channel is failed.
8079         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
8080                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
8081         ) -> Result<msgs::OpenChannelV2, ()>
8082         where
8083                 F::Target: FeeEstimator
8084         {
8085                 self.context.maybe_downgrade_channel_features(fee_estimator)?;
8086                 Ok(self.get_open_channel_v2(chain_hash))
8087         }
8088
8089         pub fn get_open_channel_v2(&self, chain_hash: ChainHash) -> msgs::OpenChannelV2 {
8090                 if self.context.have_received_message() {
8091                         debug_assert!(false, "Cannot generate an open_channel2 after we've moved forward");
8092                 }
8093
8094                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8095                         debug_assert!(false, "Tried to send an open_channel2 for a channel that has already advanced");
8096                 }
8097
8098                 let first_per_commitment_point = self.context.holder_signer.as_ref()
8099                         .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number,
8100                                 &self.context.secp_ctx);
8101                 let second_per_commitment_point = self.context.holder_signer.as_ref()
8102                         .get_per_commitment_point(self.context.cur_holder_commitment_transaction_number - 1,
8103                                 &self.context.secp_ctx);
8104                 let keys = self.context.get_holder_pubkeys();
8105
8106                 msgs::OpenChannelV2 {
8107                         common_fields: msgs::CommonOpenChannelFields {
8108                                 chain_hash,
8109                                 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8110                                 funding_satoshis: self.context.channel_value_satoshis,
8111                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8112                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8113                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8114                                 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8115                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
8116                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8117                                 funding_pubkey: keys.funding_pubkey,
8118                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8119                                 payment_basepoint: keys.payment_point,
8120                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8121                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8122                                 first_per_commitment_point,
8123                                 channel_flags: if self.context.config.announced_channel {1} else {0},
8124                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8125                                         Some(script) => script.clone().into_inner(),
8126                                         None => Builder::new().into_script(),
8127                                 }),
8128                                 channel_type: Some(self.context.channel_type.clone()),
8129                         },
8130                         funding_feerate_sat_per_1000_weight: self.context.feerate_per_kw,
8131                         second_per_commitment_point,
8132                         locktime: self.dual_funding_context.funding_tx_locktime,
8133                         require_confirmed_inputs: None,
8134                 }
8135         }
8136 }
8137
8138 // A not-yet-funded inbound (from counterparty) channel using V2 channel establishment.
8139 #[cfg(any(dual_funding, splicing))]
8140 pub(super) struct InboundV2Channel<SP: Deref> where SP::Target: SignerProvider {
8141         pub context: ChannelContext<SP>,
8142         pub unfunded_context: UnfundedChannelContext,
8143         pub dual_funding_context: DualFundingChannelContext,
8144 }
8145
8146 #[cfg(any(dual_funding, splicing))]
8147 impl<SP: Deref> InboundV2Channel<SP> where SP::Target: SignerProvider {
8148         /// Creates a new dual-funded channel from a remote side's request for one.
8149         /// Assumes chain_hash has already been checked and corresponds with what we expect!
8150         pub fn new<ES: Deref, F: Deref, L: Deref>(
8151                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
8152                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
8153                 their_features: &InitFeatures, msg: &msgs::OpenChannelV2, funding_satoshis: u64, user_id: u128,
8154                 config: &UserConfig, current_chain_height: u32, logger: &L,
8155         ) -> Result<InboundV2Channel<SP>, ChannelError>
8156                 where ES::Target: EntropySource,
8157                           F::Target: FeeEstimator,
8158                           L::Target: Logger,
8159         {
8160                 let channel_value_satoshis = funding_satoshis.saturating_add(msg.common_fields.funding_satoshis);
8161                 let counterparty_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8162                         channel_value_satoshis, msg.common_fields.dust_limit_satoshis);
8163                 let holder_selected_channel_reserve_satoshis = get_v2_channel_reserve_satoshis(
8164                         channel_value_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
8165
8166                 // First check the channel type is known, failing before we do anything else if we don't
8167                 // support this channel type.
8168                 if msg.common_fields.channel_type.is_none() {
8169                         return Err(ChannelError::Close(format!("Rejecting V2 channel {} missing channel_type",
8170                                 msg.common_fields.temporary_channel_id)))
8171                 }
8172                 let channel_type = channel_type_from_open_channel(&msg.common_fields, their_features, our_supported_features)?;
8173
8174                 let counterparty_pubkeys = ChannelPublicKeys {
8175                         funding_pubkey: msg.common_fields.funding_pubkey,
8176                         revocation_basepoint: RevocationBasepoint(msg.common_fields.revocation_basepoint),
8177                         payment_point: msg.common_fields.payment_basepoint,
8178                         delayed_payment_basepoint: DelayedPaymentBasepoint(msg.common_fields.delayed_payment_basepoint),
8179                         htlc_basepoint: HtlcBasepoint(msg.common_fields.htlc_basepoint)
8180                 };
8181
8182                 let mut context = ChannelContext::new_for_inbound_channel(
8183                         fee_estimator,
8184                         entropy_source,
8185                         signer_provider,
8186                         counterparty_node_id,
8187                         their_features,
8188                         user_id,
8189                         config,
8190                         current_chain_height,
8191                         logger,
8192                         false,
8193
8194                         funding_satoshis,
8195
8196                         counterparty_pubkeys,
8197                         channel_type,
8198                         holder_selected_channel_reserve_satoshis,
8199                         counterparty_selected_channel_reserve_satoshis,
8200                         0 /* push_msat not used in dual-funding */,
8201                         msg.common_fields.clone(),
8202                 )?;
8203                 let channel_id = ChannelId::v2_from_revocation_basepoints(
8204                         &context.get_holder_pubkeys().revocation_basepoint,
8205                         &context.get_counterparty_pubkeys().revocation_basepoint);
8206                 context.channel_id = channel_id;
8207
8208                 let chan = Self {
8209                         context,
8210                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 },
8211                         dual_funding_context: DualFundingChannelContext {
8212                                 our_funding_satoshis: funding_satoshis,
8213                                 their_funding_satoshis: msg.common_fields.funding_satoshis,
8214                                 funding_tx_locktime: msg.locktime,
8215                                 funding_feerate_sat_per_1000_weight: msg.funding_feerate_sat_per_1000_weight,
8216                         }
8217                 };
8218
8219                 Ok(chan)
8220         }
8221
8222         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannelV2`] message which
8223         /// should be sent back to the counterparty node.
8224         ///
8225         /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8226         pub fn accept_inbound_dual_funded_channel(&mut self) -> msgs::AcceptChannelV2 {
8227                 if self.context.is_outbound() {
8228                         debug_assert!(false, "Tried to send accept_channel for an outbound channel?");
8229                 }
8230                 if !matches!(
8231                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
8232                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
8233                 ) {
8234                         debug_assert!(false, "Tried to send accept_channel2 after channel had moved forward");
8235                 }
8236                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
8237                         debug_assert!(false, "Tried to send an accept_channel2 for a channel that has already advanced");
8238                 }
8239
8240                 self.generate_accept_channel_v2_message()
8241         }
8242
8243         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
8244         /// inbound channel. If the intention is to accept an inbound channel, use
8245         /// [`InboundV1Channel::accept_inbound_channel`] instead.
8246         ///
8247         /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8248         fn generate_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8249                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8250                         self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
8251                 let second_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(
8252                         self.context.cur_holder_commitment_transaction_number - 1, &self.context.secp_ctx);
8253                 let keys = self.context.get_holder_pubkeys();
8254
8255                 msgs::AcceptChannelV2 {
8256                         common_fields: msgs::CommonAcceptChannelFields {
8257                                 temporary_channel_id: self.context.temporary_channel_id.unwrap(),
8258                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
8259                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
8260                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
8261                                 minimum_depth: self.context.minimum_depth.unwrap(),
8262                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
8263                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
8264                                 funding_pubkey: keys.funding_pubkey,
8265                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
8266                                 payment_basepoint: keys.payment_point,
8267                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
8268                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
8269                                 first_per_commitment_point,
8270                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
8271                                         Some(script) => script.clone().into_inner(),
8272                                         None => Builder::new().into_script(),
8273                                 }),
8274                                 channel_type: Some(self.context.channel_type.clone()),
8275                         },
8276                         funding_satoshis: self.dual_funding_context.our_funding_satoshis,
8277                         second_per_commitment_point,
8278                         require_confirmed_inputs: None,
8279                 }
8280         }
8281
8282         /// Enables the possibility for tests to extract a [`msgs::AcceptChannelV2`] message for an
8283         /// inbound channel without accepting it.
8284         ///
8285         /// [`msgs::AcceptChannelV2`]: crate::ln::msgs::AcceptChannelV2
8286         #[cfg(test)]
8287         pub fn get_accept_channel_v2_message(&self) -> msgs::AcceptChannelV2 {
8288                 self.generate_accept_channel_v2_message()
8289         }
8290 }
8291
8292 // Unfunded channel utilities
8293
8294 fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
8295         // The default channel type (ie the first one we try) depends on whether the channel is
8296         // public - if it is, we just go with `only_static_remotekey` as it's the only option
8297         // available. If it's private, we first try `scid_privacy` as it provides better privacy
8298         // with no other changes, and fall back to `only_static_remotekey`.
8299         let mut ret = ChannelTypeFeatures::only_static_remote_key();
8300         if !config.channel_handshake_config.announced_channel &&
8301                 config.channel_handshake_config.negotiate_scid_privacy &&
8302                 their_features.supports_scid_privacy() {
8303                 ret.set_scid_privacy_required();
8304         }
8305
8306         // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
8307         // set it now. If they don't understand it, we'll fall back to our default of
8308         // `only_static_remotekey`.
8309         if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
8310                 their_features.supports_anchors_zero_fee_htlc_tx() {
8311                 ret.set_anchors_zero_fee_htlc_tx_required();
8312         }
8313
8314         ret
8315 }
8316
8317 const SERIALIZATION_VERSION: u8 = 4;
8318 const MIN_SERIALIZATION_VERSION: u8 = 3;
8319
8320 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
8321         (0, FailRelay),
8322         (1, FailMalformed),
8323         (2, Fulfill),
8324 );
8325
8326 impl Writeable for ChannelUpdateStatus {
8327         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8328                 // We only care about writing out the current state as it was announced, ie only either
8329                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
8330                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
8331                 match self {
8332                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
8333                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
8334                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
8335                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
8336                 }
8337                 Ok(())
8338         }
8339 }
8340
8341 impl Readable for ChannelUpdateStatus {
8342         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8343                 Ok(match <u8 as Readable>::read(reader)? {
8344                         0 => ChannelUpdateStatus::Enabled,
8345                         1 => ChannelUpdateStatus::Disabled,
8346                         _ => return Err(DecodeError::InvalidValue),
8347                 })
8348         }
8349 }
8350
8351 impl Writeable for AnnouncementSigsState {
8352         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8353                 // We only care about writing out the current state as if we had just disconnected, at
8354                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
8355                 match self {
8356                         AnnouncementSigsState::NotSent => 0u8.write(writer),
8357                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
8358                         AnnouncementSigsState::Committed => 0u8.write(writer),
8359                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
8360                 }
8361         }
8362 }
8363
8364 impl Readable for AnnouncementSigsState {
8365         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
8366                 Ok(match <u8 as Readable>::read(reader)? {
8367                         0 => AnnouncementSigsState::NotSent,
8368                         1 => AnnouncementSigsState::PeerReceived,
8369                         _ => return Err(DecodeError::InvalidValue),
8370                 })
8371         }
8372 }
8373
8374 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
8375         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
8376                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
8377                 // called.
8378
8379                 let version_to_write = if self.context.pending_inbound_htlcs.iter().any(|htlc| match htlc.state {
8380                         InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution)|
8381                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
8382                                 matches!(htlc_resolution, InboundHTLCResolution::Pending { .. })
8383                         },
8384                         _ => false,
8385                 }) {
8386                         SERIALIZATION_VERSION
8387                 } else {
8388                         MIN_SERIALIZATION_VERSION
8389                 };
8390                 write_ver_prefix!(writer, version_to_write, MIN_SERIALIZATION_VERSION);
8391
8392                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8393                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
8394                 // the low bytes now and the optional high bytes later.
8395                 let user_id_low = self.context.user_id as u64;
8396                 user_id_low.write(writer)?;
8397
8398                 // Version 1 deserializers expected to read parts of the config object here. Version 2
8399                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
8400                 // `minimum_depth` we simply write dummy values here.
8401                 writer.write_all(&[0; 8])?;
8402
8403                 self.context.channel_id.write(writer)?;
8404                 {
8405                         let mut channel_state = self.context.channel_state;
8406                         if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
8407                                 channel_state.set_peer_disconnected();
8408                         } else {
8409                                 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
8410                         }
8411                         channel_state.to_u32().write(writer)?;
8412                 }
8413                 self.context.channel_value_satoshis.write(writer)?;
8414
8415                 self.context.latest_monitor_update_id.write(writer)?;
8416
8417                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
8418                 // deserialized from that format.
8419                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
8420                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
8421                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
8422                 }
8423                 self.context.destination_script.write(writer)?;
8424
8425                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
8426                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
8427                 self.context.value_to_self_msat.write(writer)?;
8428
8429                 let mut dropped_inbound_htlcs = 0;
8430                 for htlc in self.context.pending_inbound_htlcs.iter() {
8431                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
8432                                 dropped_inbound_htlcs += 1;
8433                         }
8434                 }
8435                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
8436                 for htlc in self.context.pending_inbound_htlcs.iter() {
8437                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
8438                                 continue; // Drop
8439                         }
8440                         htlc.htlc_id.write(writer)?;
8441                         htlc.amount_msat.write(writer)?;
8442                         htlc.cltv_expiry.write(writer)?;
8443                         htlc.payment_hash.write(writer)?;
8444                         match &htlc.state {
8445                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
8446                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_resolution) => {
8447                                         1u8.write(writer)?;
8448                                         if version_to_write <= 3 {
8449                                                 if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8450                                                         pending_htlc_status.write(writer)?;
8451                                                 } else {
8452                                                         panic!();
8453                                                 }
8454                                         } else {
8455                                                 htlc_resolution.write(writer)?;
8456                                         }
8457                                 },
8458                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_resolution) => {
8459                                         2u8.write(writer)?;
8460                                         if version_to_write <= 3 {
8461                                                 if let InboundHTLCResolution::Resolved { pending_htlc_status } = htlc_resolution {
8462                                                         pending_htlc_status.write(writer)?;
8463                                                 } else {
8464                                                         panic!();
8465                                                 }
8466                                         } else {
8467                                                 htlc_resolution.write(writer)?;
8468                                         }
8469                                 },
8470                                 &InboundHTLCState::Committed => {
8471                                         3u8.write(writer)?;
8472                                 },
8473                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
8474                                         4u8.write(writer)?;
8475                                         removal_reason.write(writer)?;
8476                                 },
8477                         }
8478                 }
8479
8480                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
8481                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
8482                 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8483
8484                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
8485                 for htlc in self.context.pending_outbound_htlcs.iter() {
8486                         htlc.htlc_id.write(writer)?;
8487                         htlc.amount_msat.write(writer)?;
8488                         htlc.cltv_expiry.write(writer)?;
8489                         htlc.payment_hash.write(writer)?;
8490                         htlc.source.write(writer)?;
8491                         match &htlc.state {
8492                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8493                                         0u8.write(writer)?;
8494                                         onion_packet.write(writer)?;
8495                                 },
8496                                 &OutboundHTLCState::Committed => {
8497                                         1u8.write(writer)?;
8498                                 },
8499                                 &OutboundHTLCState::RemoteRemoved(_) => {
8500                                         // Treat this as a Committed because we haven't received the CS - they'll
8501                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8502                                         1u8.write(writer)?;
8503                                 },
8504                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8505                                         3u8.write(writer)?;
8506                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
8507                                                 preimages.push(preimage);
8508                                         }
8509                                         let reason: Option<&HTLCFailReason> = outcome.into();
8510                                         reason.write(writer)?;
8511                                 }
8512                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8513                                         4u8.write(writer)?;
8514                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
8515                                                 preimages.push(preimage);
8516                                         }
8517                                         let reason: Option<&HTLCFailReason> = outcome.into();
8518                                         reason.write(writer)?;
8519                                 }
8520                         }
8521                         pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8522                         pending_outbound_blinding_points.push(htlc.blinding_point);
8523                 }
8524
8525                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8526                 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8527                 // Vec of (htlc_id, failure_code, sha256_of_onion)
8528                 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8529                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8530                 for update in self.context.holding_cell_htlc_updates.iter() {
8531                         match update {
8532                                 &HTLCUpdateAwaitingACK::AddHTLC {
8533                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8534                                         blinding_point, skimmed_fee_msat,
8535                                 } => {
8536                                         0u8.write(writer)?;
8537                                         amount_msat.write(writer)?;
8538                                         cltv_expiry.write(writer)?;
8539                                         payment_hash.write(writer)?;
8540                                         source.write(writer)?;
8541                                         onion_routing_packet.write(writer)?;
8542
8543                                         holding_cell_skimmed_fees.push(skimmed_fee_msat);
8544                                         holding_cell_blinding_points.push(blinding_point);
8545                                 },
8546                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8547                                         1u8.write(writer)?;
8548                                         payment_preimage.write(writer)?;
8549                                         htlc_id.write(writer)?;
8550                                 },
8551                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8552                                         2u8.write(writer)?;
8553                                         htlc_id.write(writer)?;
8554                                         err_packet.write(writer)?;
8555                                 }
8556                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8557                                         htlc_id, failure_code, sha256_of_onion
8558                                 } => {
8559                                         // We don't want to break downgrading by adding a new variant, so write a dummy
8560                                         // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8561                                         malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8562
8563                                         let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8564                                         2u8.write(writer)?;
8565                                         htlc_id.write(writer)?;
8566                                         dummy_err_packet.write(writer)?;
8567                                 }
8568                         }
8569                 }
8570
8571                 match self.context.resend_order {
8572                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8573                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8574                 }
8575
8576                 self.context.monitor_pending_channel_ready.write(writer)?;
8577                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8578                 self.context.monitor_pending_commitment_signed.write(writer)?;
8579
8580                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8581                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8582                         pending_forward.write(writer)?;
8583                         htlc_id.write(writer)?;
8584                 }
8585
8586                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8587                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8588                         htlc_source.write(writer)?;
8589                         payment_hash.write(writer)?;
8590                         fail_reason.write(writer)?;
8591                 }
8592
8593                 if self.context.is_outbound() {
8594                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8595                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8596                         Some(feerate).write(writer)?;
8597                 } else {
8598                         // As for inbound HTLCs, if the update was only announced and never committed in a
8599                         // commitment_signed, drop it.
8600                         None::<u32>.write(writer)?;
8601                 }
8602                 self.context.holding_cell_update_fee.write(writer)?;
8603
8604                 self.context.next_holder_htlc_id.write(writer)?;
8605                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8606                 self.context.update_time_counter.write(writer)?;
8607                 self.context.feerate_per_kw.write(writer)?;
8608
8609                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8610                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8611                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8612                 // consider the stale state on reload.
8613                 0u8.write(writer)?;
8614
8615                 self.context.funding_tx_confirmed_in.write(writer)?;
8616                 self.context.funding_tx_confirmation_height.write(writer)?;
8617                 self.context.short_channel_id.write(writer)?;
8618
8619                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8620                 self.context.holder_dust_limit_satoshis.write(writer)?;
8621                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8622
8623                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8624                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8625
8626                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8627                 self.context.holder_htlc_minimum_msat.write(writer)?;
8628                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8629
8630                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8631                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8632
8633                 match &self.context.counterparty_forwarding_info {
8634                         Some(info) => {
8635                                 1u8.write(writer)?;
8636                                 info.fee_base_msat.write(writer)?;
8637                                 info.fee_proportional_millionths.write(writer)?;
8638                                 info.cltv_expiry_delta.write(writer)?;
8639                         },
8640                         None => 0u8.write(writer)?
8641                 }
8642
8643                 self.context.channel_transaction_parameters.write(writer)?;
8644                 self.context.funding_transaction.write(writer)?;
8645
8646                 self.context.counterparty_cur_commitment_point.write(writer)?;
8647                 self.context.counterparty_prev_commitment_point.write(writer)?;
8648                 self.context.counterparty_node_id.write(writer)?;
8649
8650                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8651
8652                 self.context.commitment_secrets.write(writer)?;
8653
8654                 self.context.channel_update_status.write(writer)?;
8655
8656                 #[cfg(any(test, fuzzing))]
8657                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8658                 #[cfg(any(test, fuzzing))]
8659                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8660                         htlc.write(writer)?;
8661                 }
8662
8663                 // If the channel type is something other than only-static-remote-key, then we need to have
8664                 // older clients fail to deserialize this channel at all. If the type is
8665                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8666                 // out at all.
8667                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8668                         Some(&self.context.channel_type) } else { None };
8669
8670                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8671                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8672                 // a different percentage of the channel value then 10%, which older versions of LDK used
8673                 // to set it to before the percentage was made configurable.
8674                 let serialized_holder_selected_reserve =
8675                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8676                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8677
8678                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8679                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8680                 let serialized_holder_htlc_max_in_flight =
8681                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8682                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8683
8684                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8685                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8686
8687                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8688                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8689                 // we write the high bytes as an option here.
8690                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8691
8692                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8693
8694                 let mut monitor_pending_update_adds = None;
8695                 if !self.context.monitor_pending_update_adds.is_empty() {
8696                         monitor_pending_update_adds = Some(&self.context.monitor_pending_update_adds);
8697                 }
8698
8699                 write_tlv_fields!(writer, {
8700                         (0, self.context.announcement_sigs, option),
8701                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8702                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
8703                         // them twice, once with their original default values above, and once as an option
8704                         // here. On the read side, old versions will simply ignore the odd-type entries here,
8705                         // and new versions map the default values to None and allow the TLV entries here to
8706                         // override that.
8707                         (1, self.context.minimum_depth, option),
8708                         (2, chan_type, option),
8709                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8710                         (4, serialized_holder_selected_reserve, option),
8711                         (5, self.context.config, required),
8712                         (6, serialized_holder_htlc_max_in_flight, option),
8713                         (7, self.context.shutdown_scriptpubkey, option),
8714                         (8, self.context.blocked_monitor_updates, optional_vec),
8715                         (9, self.context.target_closing_feerate_sats_per_kw, option),
8716                         (10, monitor_pending_update_adds, option), // Added in 0.0.122
8717                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8718                         (13, self.context.channel_creation_height, required),
8719                         (15, preimages, required_vec),
8720                         (17, self.context.announcement_sigs_state, required),
8721                         (19, self.context.latest_inbound_scid_alias, option),
8722                         (21, self.context.outbound_scid_alias, required),
8723                         (23, channel_ready_event_emitted, option),
8724                         (25, user_id_high_opt, option),
8725                         (27, self.context.channel_keys_id, required),
8726                         (28, holder_max_accepted_htlcs, option),
8727                         (29, self.context.temporary_channel_id, option),
8728                         (31, channel_pending_event_emitted, option),
8729                         (35, pending_outbound_skimmed_fees, optional_vec),
8730                         (37, holding_cell_skimmed_fees, optional_vec),
8731                         (38, self.context.is_batch_funding, option),
8732                         (39, pending_outbound_blinding_points, optional_vec),
8733                         (41, holding_cell_blinding_points, optional_vec),
8734                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8735                         // 45 and 47 are reserved for async signing
8736                         (49, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8737                 });
8738
8739                 Ok(())
8740         }
8741 }
8742
8743 const MAX_ALLOC_SIZE: usize = 64*1024;
8744 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8745                 where
8746                         ES::Target: EntropySource,
8747                         SP::Target: SignerProvider
8748 {
8749         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8750                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8751                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8752
8753                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8754                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8755                 // the low bytes now and the high bytes later.
8756                 let user_id_low: u64 = Readable::read(reader)?;
8757
8758                 let mut config = Some(LegacyChannelConfig::default());
8759                 if ver == 1 {
8760                         // Read the old serialization of the ChannelConfig from version 0.0.98.
8761                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8762                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8763                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8764                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8765                 } else {
8766                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8767                         let mut _val: u64 = Readable::read(reader)?;
8768                 }
8769
8770                 let channel_id = Readable::read(reader)?;
8771                 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8772                 let channel_value_satoshis = Readable::read(reader)?;
8773
8774                 let latest_monitor_update_id = Readable::read(reader)?;
8775
8776                 let mut keys_data = None;
8777                 if ver <= 2 {
8778                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8779                         // the `channel_keys_id` TLV is present below.
8780                         let keys_len: u32 = Readable::read(reader)?;
8781                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8782                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
8783                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8784                                 let mut data = [0; 1024];
8785                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8786                                 reader.read_exact(read_slice)?;
8787                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8788                         }
8789                 }
8790
8791                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8792                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8793                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8794                         Err(_) => None,
8795                 };
8796                 let destination_script = Readable::read(reader)?;
8797
8798                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8799                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8800                 let value_to_self_msat = Readable::read(reader)?;
8801
8802                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8803
8804                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8805                 for _ in 0..pending_inbound_htlc_count {
8806                         pending_inbound_htlcs.push(InboundHTLCOutput {
8807                                 htlc_id: Readable::read(reader)?,
8808                                 amount_msat: Readable::read(reader)?,
8809                                 cltv_expiry: Readable::read(reader)?,
8810                                 payment_hash: Readable::read(reader)?,
8811                                 state: match <u8 as Readable>::read(reader)? {
8812                                         1 => {
8813                                                 let resolution = if ver <= 3 {
8814                                                         InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
8815                                                 } else {
8816                                                         Readable::read(reader)?
8817                                                 };
8818                                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(resolution)
8819                                         },
8820                                         2 => {
8821                                                 let resolution = if ver <= 3 {
8822                                                         InboundHTLCResolution::Resolved { pending_htlc_status: Readable::read(reader)? }
8823                                                 } else {
8824                                                         Readable::read(reader)?
8825                                                 };
8826                                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(resolution)
8827                                         },
8828                                         3 => InboundHTLCState::Committed,
8829                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8830                                         _ => return Err(DecodeError::InvalidValue),
8831                                 },
8832                         });
8833                 }
8834
8835                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8836                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8837                 for _ in 0..pending_outbound_htlc_count {
8838                         pending_outbound_htlcs.push(OutboundHTLCOutput {
8839                                 htlc_id: Readable::read(reader)?,
8840                                 amount_msat: Readable::read(reader)?,
8841                                 cltv_expiry: Readable::read(reader)?,
8842                                 payment_hash: Readable::read(reader)?,
8843                                 source: Readable::read(reader)?,
8844                                 state: match <u8 as Readable>::read(reader)? {
8845                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8846                                         1 => OutboundHTLCState::Committed,
8847                                         2 => {
8848                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8849                                                 OutboundHTLCState::RemoteRemoved(option.into())
8850                                         },
8851                                         3 => {
8852                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8853                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8854                                         },
8855                                         4 => {
8856                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8857                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8858                                         },
8859                                         _ => return Err(DecodeError::InvalidValue),
8860                                 },
8861                                 skimmed_fee_msat: None,
8862                                 blinding_point: None,
8863                         });
8864                 }
8865
8866                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8867                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8868                 for _ in 0..holding_cell_htlc_update_count {
8869                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8870                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
8871                                         amount_msat: Readable::read(reader)?,
8872                                         cltv_expiry: Readable::read(reader)?,
8873                                         payment_hash: Readable::read(reader)?,
8874                                         source: Readable::read(reader)?,
8875                                         onion_routing_packet: Readable::read(reader)?,
8876                                         skimmed_fee_msat: None,
8877                                         blinding_point: None,
8878                                 },
8879                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8880                                         payment_preimage: Readable::read(reader)?,
8881                                         htlc_id: Readable::read(reader)?,
8882                                 },
8883                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
8884                                         htlc_id: Readable::read(reader)?,
8885                                         err_packet: Readable::read(reader)?,
8886                                 },
8887                                 _ => return Err(DecodeError::InvalidValue),
8888                         });
8889                 }
8890
8891                 let resend_order = match <u8 as Readable>::read(reader)? {
8892                         0 => RAACommitmentOrder::CommitmentFirst,
8893                         1 => RAACommitmentOrder::RevokeAndACKFirst,
8894                         _ => return Err(DecodeError::InvalidValue),
8895                 };
8896
8897                 let monitor_pending_channel_ready = Readable::read(reader)?;
8898                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8899                 let monitor_pending_commitment_signed = Readable::read(reader)?;
8900
8901                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8902                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8903                 for _ in 0..monitor_pending_forwards_count {
8904                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8905                 }
8906
8907                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8908                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8909                 for _ in 0..monitor_pending_failures_count {
8910                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8911                 }
8912
8913                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8914
8915                 let holding_cell_update_fee = Readable::read(reader)?;
8916
8917                 let next_holder_htlc_id = Readable::read(reader)?;
8918                 let next_counterparty_htlc_id = Readable::read(reader)?;
8919                 let update_time_counter = Readable::read(reader)?;
8920                 let feerate_per_kw = Readable::read(reader)?;
8921
8922                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8923                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8924                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8925                 // consider the stale state on reload.
8926                 match <u8 as Readable>::read(reader)? {
8927                         0 => {},
8928                         1 => {
8929                                 let _: u32 = Readable::read(reader)?;
8930                                 let _: u64 = Readable::read(reader)?;
8931                                 let _: Signature = Readable::read(reader)?;
8932                         },
8933                         _ => return Err(DecodeError::InvalidValue),
8934                 }
8935
8936                 let funding_tx_confirmed_in = Readable::read(reader)?;
8937                 let funding_tx_confirmation_height = Readable::read(reader)?;
8938                 let short_channel_id = Readable::read(reader)?;
8939
8940                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
8941                 let holder_dust_limit_satoshis = Readable::read(reader)?;
8942                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
8943                 let mut counterparty_selected_channel_reserve_satoshis = None;
8944                 if ver == 1 {
8945                         // Read the old serialization from version 0.0.98.
8946                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
8947                 } else {
8948                         // Read the 8 bytes of backwards-compatibility data.
8949                         let _dummy: u64 = Readable::read(reader)?;
8950                 }
8951                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8952                 let holder_htlc_minimum_msat = Readable::read(reader)?;
8953                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8954
8955                 let mut minimum_depth = None;
8956                 if ver == 1 {
8957                         // Read the old serialization from version 0.0.98.
8958                         minimum_depth = Some(Readable::read(reader)?);
8959                 } else {
8960                         // Read the 4 bytes of backwards-compatibility data.
8961                         let _dummy: u32 = Readable::read(reader)?;
8962                 }
8963
8964                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8965                         0 => None,
8966                         1 => Some(CounterpartyForwardingInfo {
8967                                 fee_base_msat: Readable::read(reader)?,
8968                                 fee_proportional_millionths: Readable::read(reader)?,
8969                                 cltv_expiry_delta: Readable::read(reader)?,
8970                         }),
8971                         _ => return Err(DecodeError::InvalidValue),
8972                 };
8973
8974                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8975                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8976
8977                 let counterparty_cur_commitment_point = Readable::read(reader)?;
8978
8979                 let counterparty_prev_commitment_point = Readable::read(reader)?;
8980                 let counterparty_node_id = Readable::read(reader)?;
8981
8982                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8983                 let commitment_secrets = Readable::read(reader)?;
8984
8985                 let channel_update_status = Readable::read(reader)?;
8986
8987                 #[cfg(any(test, fuzzing))]
8988                 let mut historical_inbound_htlc_fulfills = new_hash_set();
8989                 #[cfg(any(test, fuzzing))]
8990                 {
8991                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
8992                         for _ in 0..htlc_fulfills_len {
8993                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8994                         }
8995                 }
8996
8997                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8998                         Some((feerate, if channel_parameters.is_outbound_from_holder {
8999                                 FeeUpdateState::Outbound
9000                         } else {
9001                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
9002                         }))
9003                 } else {
9004                         None
9005                 };
9006
9007                 let mut announcement_sigs = None;
9008                 let mut target_closing_feerate_sats_per_kw = None;
9009                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
9010                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
9011                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
9012                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
9013                 // only, so we default to that if none was written.
9014                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
9015                 let mut channel_creation_height = Some(serialized_height);
9016                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
9017
9018                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
9019                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
9020                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
9021                 let mut latest_inbound_scid_alias = None;
9022                 let mut outbound_scid_alias = None;
9023                 let mut channel_pending_event_emitted = None;
9024                 let mut channel_ready_event_emitted = None;
9025
9026                 let mut user_id_high_opt: Option<u64> = None;
9027                 let mut channel_keys_id: Option<[u8; 32]> = None;
9028                 let mut temporary_channel_id: Option<ChannelId> = None;
9029                 let mut holder_max_accepted_htlcs: Option<u16> = None;
9030
9031                 let mut blocked_monitor_updates = Some(Vec::new());
9032
9033                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
9034                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
9035
9036                 let mut is_batch_funding: Option<()> = None;
9037
9038                 let mut local_initiated_shutdown: Option<()> = None;
9039
9040                 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
9041                 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
9042
9043                 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
9044                 let mut monitor_pending_update_adds: Option<Vec<msgs::UpdateAddHTLC>> = None;
9045
9046                 read_tlv_fields!(reader, {
9047                         (0, announcement_sigs, option),
9048                         (1, minimum_depth, option),
9049                         (2, channel_type, option),
9050                         (3, counterparty_selected_channel_reserve_satoshis, option),
9051                         (4, holder_selected_channel_reserve_satoshis, option),
9052                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
9053                         (6, holder_max_htlc_value_in_flight_msat, option),
9054                         (7, shutdown_scriptpubkey, option),
9055                         (8, blocked_monitor_updates, optional_vec),
9056                         (9, target_closing_feerate_sats_per_kw, option),
9057                         (10, monitor_pending_update_adds, option), // Added in 0.0.122
9058                         (11, monitor_pending_finalized_fulfills, optional_vec),
9059                         (13, channel_creation_height, option),
9060                         (15, preimages_opt, optional_vec),
9061                         (17, announcement_sigs_state, option),
9062                         (19, latest_inbound_scid_alias, option),
9063                         (21, outbound_scid_alias, option),
9064                         (23, channel_ready_event_emitted, option),
9065                         (25, user_id_high_opt, option),
9066                         (27, channel_keys_id, option),
9067                         (28, holder_max_accepted_htlcs, option),
9068                         (29, temporary_channel_id, option),
9069                         (31, channel_pending_event_emitted, option),
9070                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
9071                         (37, holding_cell_skimmed_fees_opt, optional_vec),
9072                         (38, is_batch_funding, option),
9073                         (39, pending_outbound_blinding_points_opt, optional_vec),
9074                         (41, holding_cell_blinding_points_opt, optional_vec),
9075                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
9076                         // 45 and 47 are reserved for async signing
9077                         (49, local_initiated_shutdown, option),
9078                 });
9079
9080                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
9081                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
9082                         // If we've gotten to the funding stage of the channel, populate the signer with its
9083                         // required channel parameters.
9084                         if channel_state >= ChannelState::FundingNegotiated {
9085                                 holder_signer.provide_channel_parameters(&channel_parameters);
9086                         }
9087                         (channel_keys_id, holder_signer)
9088                 } else {
9089                         // `keys_data` can be `None` if we had corrupted data.
9090                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
9091                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
9092                         (holder_signer.channel_keys_id(), holder_signer)
9093                 };
9094
9095                 if let Some(preimages) = preimages_opt {
9096                         let mut iter = preimages.into_iter();
9097                         for htlc in pending_outbound_htlcs.iter_mut() {
9098                                 match &htlc.state {
9099                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
9100                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
9101                                         }
9102                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
9103                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
9104                                         }
9105                                         _ => {}
9106                                 }
9107                         }
9108                         // We expect all preimages to be consumed above
9109                         if iter.next().is_some() {
9110                                 return Err(DecodeError::InvalidValue);
9111                         }
9112                 }
9113
9114                 let chan_features = channel_type.as_ref().unwrap();
9115                 if !chan_features.is_subset(our_supported_features) {
9116                         // If the channel was written by a new version and negotiated with features we don't
9117                         // understand yet, refuse to read it.
9118                         return Err(DecodeError::UnknownRequiredFeature);
9119                 }
9120
9121                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
9122                 // To account for that, we're proactively setting/overriding the field here.
9123                 channel_parameters.channel_type_features = chan_features.clone();
9124
9125                 let mut secp_ctx = Secp256k1::new();
9126                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
9127
9128                 // `user_id` used to be a single u64 value. In order to remain backwards
9129                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
9130                 // separate u64 values.
9131                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
9132
9133                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
9134
9135                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
9136                         let mut iter = skimmed_fees.into_iter();
9137                         for htlc in pending_outbound_htlcs.iter_mut() {
9138                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9139                         }
9140                         // We expect all skimmed fees to be consumed above
9141                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9142                 }
9143                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
9144                         let mut iter = skimmed_fees.into_iter();
9145                         for htlc in holding_cell_htlc_updates.iter_mut() {
9146                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
9147                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
9148                                 }
9149                         }
9150                         // We expect all skimmed fees to be consumed above
9151                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9152                 }
9153                 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
9154                         let mut iter = blinding_pts.into_iter();
9155                         for htlc in pending_outbound_htlcs.iter_mut() {
9156                                 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9157                         }
9158                         // We expect all blinding points to be consumed above
9159                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9160                 }
9161                 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
9162                         let mut iter = blinding_pts.into_iter();
9163                         for htlc in holding_cell_htlc_updates.iter_mut() {
9164                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
9165                                         *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
9166                                 }
9167                         }
9168                         // We expect all blinding points to be consumed above
9169                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
9170                 }
9171
9172                 if let Some(malformed_htlcs) = malformed_htlcs {
9173                         for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
9174                                 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
9175                                         if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
9176                                                 let matches = *htlc_id == malformed_htlc_id;
9177                                                 if matches { debug_assert!(err_packet.data.is_empty()) }
9178                                                 matches
9179                                         } else { false }
9180                                 }).ok_or(DecodeError::InvalidValue)?;
9181                                 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
9182                                         htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
9183                                 };
9184                                 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
9185                         }
9186                 }
9187
9188                 Ok(Channel {
9189                         context: ChannelContext {
9190                                 user_id,
9191
9192                                 config: config.unwrap(),
9193
9194                                 prev_config: None,
9195
9196                                 // Note that we don't care about serializing handshake limits as we only ever serialize
9197                                 // channel data after the handshake has completed.
9198                                 inbound_handshake_limits_override: None,
9199
9200                                 channel_id,
9201                                 temporary_channel_id,
9202                                 channel_state,
9203                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
9204                                 secp_ctx,
9205                                 channel_value_satoshis,
9206
9207                                 latest_monitor_update_id,
9208
9209                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
9210                                 shutdown_scriptpubkey,
9211                                 destination_script,
9212
9213                                 cur_holder_commitment_transaction_number,
9214                                 cur_counterparty_commitment_transaction_number,
9215                                 value_to_self_msat,
9216
9217                                 holder_max_accepted_htlcs,
9218                                 pending_inbound_htlcs,
9219                                 pending_outbound_htlcs,
9220                                 holding_cell_htlc_updates,
9221
9222                                 resend_order,
9223
9224                                 monitor_pending_channel_ready,
9225                                 monitor_pending_revoke_and_ack,
9226                                 monitor_pending_commitment_signed,
9227                                 monitor_pending_forwards,
9228                                 monitor_pending_failures,
9229                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
9230                                 monitor_pending_update_adds: monitor_pending_update_adds.unwrap_or(Vec::new()),
9231
9232                                 signer_pending_commitment_update: false,
9233                                 signer_pending_funding: false,
9234
9235                                 pending_update_fee,
9236                                 holding_cell_update_fee,
9237                                 next_holder_htlc_id,
9238                                 next_counterparty_htlc_id,
9239                                 update_time_counter,
9240                                 feerate_per_kw,
9241
9242                                 #[cfg(debug_assertions)]
9243                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
9244                                 #[cfg(debug_assertions)]
9245                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
9246
9247                                 last_sent_closing_fee: None,
9248                                 pending_counterparty_closing_signed: None,
9249                                 expecting_peer_commitment_signed: false,
9250                                 closing_fee_limits: None,
9251                                 target_closing_feerate_sats_per_kw,
9252
9253                                 funding_tx_confirmed_in,
9254                                 funding_tx_confirmation_height,
9255                                 short_channel_id,
9256                                 channel_creation_height: channel_creation_height.unwrap(),
9257
9258                                 counterparty_dust_limit_satoshis,
9259                                 holder_dust_limit_satoshis,
9260                                 counterparty_max_htlc_value_in_flight_msat,
9261                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
9262                                 counterparty_selected_channel_reserve_satoshis,
9263                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
9264                                 counterparty_htlc_minimum_msat,
9265                                 holder_htlc_minimum_msat,
9266                                 counterparty_max_accepted_htlcs,
9267                                 minimum_depth,
9268
9269                                 counterparty_forwarding_info,
9270
9271                                 channel_transaction_parameters: channel_parameters,
9272                                 funding_transaction,
9273                                 is_batch_funding,
9274
9275                                 counterparty_cur_commitment_point,
9276                                 counterparty_prev_commitment_point,
9277                                 counterparty_node_id,
9278
9279                                 counterparty_shutdown_scriptpubkey,
9280
9281                                 commitment_secrets,
9282
9283                                 channel_update_status,
9284                                 closing_signed_in_flight: false,
9285
9286                                 announcement_sigs,
9287
9288                                 #[cfg(any(test, fuzzing))]
9289                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
9290                                 #[cfg(any(test, fuzzing))]
9291                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
9292
9293                                 workaround_lnd_bug_4006: None,
9294                                 sent_message_awaiting_response: None,
9295
9296                                 latest_inbound_scid_alias,
9297                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
9298                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
9299
9300                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
9301                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
9302
9303                                 #[cfg(any(test, fuzzing))]
9304                                 historical_inbound_htlc_fulfills,
9305
9306                                 channel_type: channel_type.unwrap(),
9307                                 channel_keys_id,
9308
9309                                 local_initiated_shutdown,
9310
9311                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
9312                         },
9313                         #[cfg(any(dual_funding, splicing))]
9314                         dual_funding_channel_context: None,
9315                 })
9316         }
9317 }
9318
9319 #[cfg(test)]
9320 mod tests {
9321         use std::cmp;
9322         use bitcoin::blockdata::constants::ChainHash;
9323         use bitcoin::blockdata::script::{ScriptBuf, Builder};
9324         use bitcoin::blockdata::transaction::{Transaction, TxOut};
9325         use bitcoin::blockdata::opcodes;
9326         use bitcoin::network::constants::Network;
9327         use crate::ln::onion_utils::INVALID_ONION_BLINDING;
9328         use crate::ln::{PaymentHash, PaymentPreimage};
9329         use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
9330         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
9331         use crate::ln::channel::InitFeatures;
9332         use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
9333         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
9334         use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
9335         use crate::ln::msgs;
9336         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
9337         use crate::ln::script::ShutdownScript;
9338         use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
9339         use crate::chain::BestBlock;
9340         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
9341         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
9342         use crate::chain::transaction::OutPoint;
9343         use crate::routing::router::{Path, RouteHop};
9344         use crate::util::config::UserConfig;
9345         use crate::util::errors::APIError;
9346         use crate::util::ser::{ReadableArgs, Writeable};
9347         use crate::util::test_utils;
9348         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
9349         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
9350         use bitcoin::secp256k1::ffi::Signature as FFISignature;
9351         use bitcoin::secp256k1::{SecretKey,PublicKey};
9352         use bitcoin::hashes::sha256::Hash as Sha256;
9353         use bitcoin::hashes::Hash;
9354         use bitcoin::hashes::hex::FromHex;
9355         use bitcoin::hash_types::WPubkeyHash;
9356         use bitcoin::blockdata::locktime::absolute::LockTime;
9357         use bitcoin::address::{WitnessProgram, WitnessVersion};
9358         use crate::prelude::*;
9359
9360         #[test]
9361         fn test_channel_state_order() {
9362                 use crate::ln::channel::NegotiatingFundingFlags;
9363                 use crate::ln::channel::AwaitingChannelReadyFlags;
9364                 use crate::ln::channel::ChannelReadyFlags;
9365
9366                 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
9367                 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
9368                 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
9369                 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
9370         }
9371
9372         struct TestFeeEstimator {
9373                 fee_est: u32
9374         }
9375         impl FeeEstimator for TestFeeEstimator {
9376                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
9377                         self.fee_est
9378                 }
9379         }
9380
9381         #[test]
9382         fn test_max_funding_satoshis_no_wumbo() {
9383                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
9384                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
9385                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
9386         }
9387
9388         struct Keys {
9389                 signer: InMemorySigner,
9390         }
9391
9392         impl EntropySource for Keys {
9393                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
9394         }
9395
9396         impl SignerProvider for Keys {
9397                 type EcdsaSigner = InMemorySigner;
9398                 #[cfg(taproot)]
9399                 type TaprootSigner = InMemorySigner;
9400
9401                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
9402                         self.signer.channel_keys_id()
9403                 }
9404
9405                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
9406                         self.signer.clone()
9407                 }
9408
9409                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
9410
9411                 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
9412                         let secp_ctx = Secp256k1::signing_only();
9413                         let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9414                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
9415                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
9416                 }
9417
9418                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
9419                         let secp_ctx = Secp256k1::signing_only();
9420                         let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
9421                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
9422                 }
9423         }
9424
9425         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9426         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
9427                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
9428         }
9429
9430         #[test]
9431         fn upfront_shutdown_script_incompatibility() {
9432                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
9433                 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
9434                         &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
9435                 ).unwrap();
9436
9437                 let seed = [42; 32];
9438                 let network = Network::Testnet;
9439                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9440                 keys_provider.expect(OnGetShutdownScriptpubkey {
9441                         returns: non_v0_segwit_shutdown_script.clone(),
9442                 });
9443
9444                 let secp_ctx = Secp256k1::new();
9445                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9446                 let config = UserConfig::default();
9447                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
9448                         Err(APIError::IncompatibleShutdownScript { script }) => {
9449                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
9450                         },
9451                         Err(e) => panic!("Unexpected error: {:?}", e),
9452                         Ok(_) => panic!("Expected error"),
9453                 }
9454         }
9455
9456         // Check that, during channel creation, we use the same feerate in the open channel message
9457         // as we do in the Channel object creation itself.
9458         #[test]
9459         fn test_open_channel_msg_fee() {
9460                 let original_fee = 253;
9461                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
9462                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
9463                 let secp_ctx = Secp256k1::new();
9464                 let seed = [42; 32];
9465                 let network = Network::Testnet;
9466                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9467
9468                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9469                 let config = UserConfig::default();
9470                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9471
9472                 // Now change the fee so we can check that the fee in the open_channel message is the
9473                 // same as the old fee.
9474                 fee_est.fee_est = 500;
9475                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9476                 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
9477         }
9478
9479         #[test]
9480         fn test_holder_vs_counterparty_dust_limit() {
9481                 // Test that when calculating the local and remote commitment transaction fees, the correct
9482                 // dust limits are used.
9483                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9484                 let secp_ctx = Secp256k1::new();
9485                 let seed = [42; 32];
9486                 let network = Network::Testnet;
9487                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9488                 let logger = test_utils::TestLogger::new();
9489                 let best_block = BestBlock::from_network(network);
9490
9491                 // Go through the flow of opening a channel between two nodes, making sure
9492                 // they have different dust limits.
9493
9494                 // Create Node A's channel pointing to Node B's pubkey
9495                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9496                 let config = UserConfig::default();
9497                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9498
9499                 // Create Node B's channel by receiving Node A's open_channel message
9500                 // Make sure A's dust limit is as we expect.
9501                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9502                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9503                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9504
9505                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9506                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9507                 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9508                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9509                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9510
9511                 // Node A --> Node B: funding created
9512                 let output_script = node_a_chan.context.get_funding_redeemscript();
9513                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9514                         value: 10000000, script_pubkey: output_script.clone(),
9515                 }]};
9516                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9517                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9518                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9519
9520                 // Node B --> Node A: funding signed
9521                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9522                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9523
9524                 // Put some inbound and outbound HTLCs in A's channel.
9525                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9526                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9527                         htlc_id: 0,
9528                         amount_msat: htlc_amount_msat,
9529                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9530                         cltv_expiry: 300000000,
9531                         state: InboundHTLCState::Committed,
9532                 });
9533
9534                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9535                         htlc_id: 1,
9536                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9537                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9538                         cltv_expiry: 200000000,
9539                         state: OutboundHTLCState::Committed,
9540                         source: HTLCSource::OutboundRoute {
9541                                 path: Path { hops: Vec::new(), blinded_tail: None },
9542                                 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9543                                 first_hop_htlc_msat: 548,
9544                                 payment_id: PaymentId([42; 32]),
9545                         },
9546                         skimmed_fee_msat: None,
9547                         blinding_point: None,
9548                 });
9549
9550                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9551                 // the dust limit check.
9552                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9553                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9554                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9555                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9556
9557                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9558                 // of the HTLCs are seen to be above the dust limit.
9559                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9560                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9561                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9562                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9563                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9564         }
9565
9566         #[test]
9567         fn test_timeout_vs_success_htlc_dust_limit() {
9568                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9569                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9570                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9571                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9572                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9573                 let secp_ctx = Secp256k1::new();
9574                 let seed = [42; 32];
9575                 let network = Network::Testnet;
9576                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9577
9578                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9579                 let config = UserConfig::default();
9580                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9581
9582                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9583                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9584
9585                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9586                 // counted as dust when it shouldn't be.
9587                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9588                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9589                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9590                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9591
9592                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9593                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9594                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9595                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9596                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9597
9598                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9599
9600                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9601                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9602                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9603                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9604                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9605
9606                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9607                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9608                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9609                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9610                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9611         }
9612
9613         #[test]
9614         fn channel_reestablish_no_updates() {
9615                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9616                 let logger = test_utils::TestLogger::new();
9617                 let secp_ctx = Secp256k1::new();
9618                 let seed = [42; 32];
9619                 let network = Network::Testnet;
9620                 let best_block = BestBlock::from_network(network);
9621                 let chain_hash = ChainHash::using_genesis_block(network);
9622                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9623
9624                 // Go through the flow of opening a channel between two nodes.
9625
9626                 // Create Node A's channel pointing to Node B's pubkey
9627                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9628                 let config = UserConfig::default();
9629                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9630
9631                 // Create Node B's channel by receiving Node A's open_channel message
9632                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9633                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9634                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9635
9636                 // Node B --> Node A: accept channel
9637                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9638                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9639
9640                 // Node A --> Node B: funding created
9641                 let output_script = node_a_chan.context.get_funding_redeemscript();
9642                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9643                         value: 10000000, script_pubkey: output_script.clone(),
9644                 }]};
9645                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9646                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9647                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9648
9649                 // Node B --> Node A: funding signed
9650                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9651                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9652
9653                 // Now disconnect the two nodes and check that the commitment point in
9654                 // Node B's channel_reestablish message is sane.
9655                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9656                 let msg = node_b_chan.get_channel_reestablish(&&logger);
9657                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9658                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9659                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9660
9661                 // Check that the commitment point in Node A's channel_reestablish message
9662                 // is sane.
9663                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9664                 let msg = node_a_chan.get_channel_reestablish(&&logger);
9665                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9666                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9667                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9668         }
9669
9670         #[test]
9671         fn test_configured_holder_max_htlc_value_in_flight() {
9672                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9673                 let logger = test_utils::TestLogger::new();
9674                 let secp_ctx = Secp256k1::new();
9675                 let seed = [42; 32];
9676                 let network = Network::Testnet;
9677                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9678                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9679                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9680
9681                 let mut config_2_percent = UserConfig::default();
9682                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9683                 let mut config_99_percent = UserConfig::default();
9684                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9685                 let mut config_0_percent = UserConfig::default();
9686                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9687                 let mut config_101_percent = UserConfig::default();
9688                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9689
9690                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9691                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9692                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9693                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9694                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9695                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9696
9697                 // Test with the upper bound - 1 of valid values (99%).
9698                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9699                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9700                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9701
9702                 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9703
9704                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9705                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9706                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9707                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9708                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9709                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9710
9711                 // Test with the upper bound - 1 of valid values (99%).
9712                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9713                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9714                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9715
9716                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9717                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9718                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9719                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9720                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9721
9722                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9723                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9724                 // than 100.
9725                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9726                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9727                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9728
9729                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9730                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9731                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9732                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9733                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9734
9735                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9736                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9737                 // than 100.
9738                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9739                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9740                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9741         }
9742
9743         #[test]
9744         fn test_configured_holder_selected_channel_reserve_satoshis() {
9745
9746                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9747                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9748                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9749
9750                 // Test with valid but unreasonably high channel reserves
9751                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9752                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9753                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9754
9755                 // Test with calculated channel reserve less than lower bound
9756                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9757                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9758
9759                 // Test with invalid channel reserves since sum of both is greater than or equal
9760                 // to channel value
9761                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9762                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9763         }
9764
9765         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9766                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9767                 let logger = test_utils::TestLogger::new();
9768                 let secp_ctx = Secp256k1::new();
9769                 let seed = [42; 32];
9770                 let network = Network::Testnet;
9771                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9772                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9773                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9774
9775
9776                 let mut outbound_node_config = UserConfig::default();
9777                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9778                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9779
9780                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9781                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9782
9783                 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9784                 let mut inbound_node_config = UserConfig::default();
9785                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9786
9787                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9788                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9789
9790                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9791
9792                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9793                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9794                 } else {
9795                         // Channel Negotiations failed
9796                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9797                         assert!(result.is_err());
9798                 }
9799         }
9800
9801         #[test]
9802         fn channel_update() {
9803                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9804                 let logger = test_utils::TestLogger::new();
9805                 let secp_ctx = Secp256k1::new();
9806                 let seed = [42; 32];
9807                 let network = Network::Testnet;
9808                 let best_block = BestBlock::from_network(network);
9809                 let chain_hash = ChainHash::using_genesis_block(network);
9810                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9811
9812                 // Create Node A's channel pointing to Node B's pubkey
9813                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9814                 let config = UserConfig::default();
9815                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9816
9817                 // Create Node B's channel by receiving Node A's open_channel message
9818                 // Make sure A's dust limit is as we expect.
9819                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9820                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9821                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9822
9823                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9824                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9825                 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9826                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9827                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9828
9829                 // Node A --> Node B: funding created
9830                 let output_script = node_a_chan.context.get_funding_redeemscript();
9831                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9832                         value: 10000000, script_pubkey: output_script.clone(),
9833                 }]};
9834                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9835                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9836                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9837
9838                 // Node B --> Node A: funding signed
9839                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9840                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9841
9842                 // Make sure that receiving a channel update will update the Channel as expected.
9843                 let update = ChannelUpdate {
9844                         contents: UnsignedChannelUpdate {
9845                                 chain_hash,
9846                                 short_channel_id: 0,
9847                                 timestamp: 0,
9848                                 flags: 0,
9849                                 cltv_expiry_delta: 100,
9850                                 htlc_minimum_msat: 5,
9851                                 htlc_maximum_msat: MAX_VALUE_MSAT,
9852                                 fee_base_msat: 110,
9853                                 fee_proportional_millionths: 11,
9854                                 excess_data: Vec::new(),
9855                         },
9856                         signature: Signature::from(unsafe { FFISignature::new() })
9857                 };
9858                 assert!(node_a_chan.channel_update(&update).unwrap());
9859
9860                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9861                 // change our official htlc_minimum_msat.
9862                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9863                 match node_a_chan.context.counterparty_forwarding_info() {
9864                         Some(info) => {
9865                                 assert_eq!(info.cltv_expiry_delta, 100);
9866                                 assert_eq!(info.fee_base_msat, 110);
9867                                 assert_eq!(info.fee_proportional_millionths, 11);
9868                         },
9869                         None => panic!("expected counterparty forwarding info to be Some")
9870                 }
9871
9872                 assert!(!node_a_chan.channel_update(&update).unwrap());
9873         }
9874
9875         #[test]
9876         fn blinding_point_skimmed_fee_malformed_ser() {
9877                 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9878                 // properly.
9879                 let logger = test_utils::TestLogger::new();
9880                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9881                 let secp_ctx = Secp256k1::new();
9882                 let seed = [42; 32];
9883                 let network = Network::Testnet;
9884                 let best_block = BestBlock::from_network(network);
9885                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9886
9887                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9888                 let config = UserConfig::default();
9889                 let features = channelmanager::provided_init_features(&config);
9890                 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9891                         &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9892                 ).unwrap();
9893                 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9894                         &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9895                         &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9896                 ).unwrap();
9897                 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9898                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9899                         value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9900                 }]};
9901                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9902                 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9903                 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9904                         Ok((chan, _, _)) => chan,
9905                         Err((_, e)) => panic!("{}", e),
9906                 };
9907
9908                 let dummy_htlc_source = HTLCSource::OutboundRoute {
9909                         path: Path {
9910                                 hops: vec![RouteHop {
9911                                         pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9912                                         node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9913                                         cltv_expiry_delta: 0, maybe_announced_channel: false,
9914                                 }],
9915                                 blinded_tail: None
9916                         },
9917                         session_priv: test_utils::privkey(42),
9918                         first_hop_htlc_msat: 0,
9919                         payment_id: PaymentId([42; 32]),
9920                 };
9921                 let dummy_outbound_output = OutboundHTLCOutput {
9922                         htlc_id: 0,
9923                         amount_msat: 0,
9924                         payment_hash: PaymentHash([43; 32]),
9925                         cltv_expiry: 0,
9926                         state: OutboundHTLCState::Committed,
9927                         source: dummy_htlc_source.clone(),
9928                         skimmed_fee_msat: None,
9929                         blinding_point: None,
9930                 };
9931                 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
9932                 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
9933                         if idx % 2 == 0 {
9934                                 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
9935                         }
9936                         if idx % 3 == 0 {
9937                                 htlc.skimmed_fee_msat = Some(1);
9938                         }
9939                 }
9940                 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
9941
9942                 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
9943                         amount_msat: 0,
9944                         cltv_expiry: 0,
9945                         payment_hash: PaymentHash([43; 32]),
9946                         source: dummy_htlc_source.clone(),
9947                         onion_routing_packet: msgs::OnionPacket {
9948                                 version: 0,
9949                                 public_key: Ok(test_utils::pubkey(1)),
9950                                 hop_data: [0; 20*65],
9951                                 hmac: [0; 32]
9952                         },
9953                         skimmed_fee_msat: None,
9954                         blinding_point: None,
9955                 };
9956                 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
9957                         payment_preimage: PaymentPreimage([42; 32]),
9958                         htlc_id: 0,
9959                 };
9960                 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9961                         htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9962                 };
9963                 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9964                         htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9965                 };
9966                 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9967                 for i in 0..12 {
9968                         if i % 5 == 0 {
9969                                 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9970                         } else if i % 5 == 1 {
9971                                 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9972                         } else if i % 5 == 2 {
9973                                 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9974                                 if let HTLCUpdateAwaitingACK::AddHTLC {
9975                                         ref mut blinding_point, ref mut skimmed_fee_msat, ..
9976                                 } = &mut dummy_add {
9977                                         *blinding_point = Some(test_utils::pubkey(42 + i));
9978                                         *skimmed_fee_msat = Some(42);
9979                                 } else { panic!() }
9980                                 holding_cell_htlc_updates.push(dummy_add);
9981                         } else if i % 5 == 3 {
9982                                 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9983                         } else {
9984                                 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9985                         }
9986                 }
9987                 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9988
9989                 // Encode and decode the channel and ensure that the HTLCs within are the same.
9990                 let encoded_chan = chan.encode();
9991                 let mut s = crate::io::Cursor::new(&encoded_chan);
9992                 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9993                 let features = channelmanager::provided_channel_type_features(&config);
9994                 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9995                 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9996                 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9997         }
9998
9999         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
10000         #[test]
10001         fn outbound_commitment_test() {
10002                 use bitcoin::sighash;
10003                 use bitcoin::consensus::encode::serialize;
10004                 use bitcoin::sighash::EcdsaSighashType;
10005                 use bitcoin::hashes::hex::FromHex;
10006                 use bitcoin::hash_types::Txid;
10007                 use bitcoin::secp256k1::Message;
10008                 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
10009                 use crate::ln::PaymentPreimage;
10010                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
10011                 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
10012                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
10013                 use crate::util::logger::Logger;
10014                 use crate::sync::Arc;
10015                 use core::str::FromStr;
10016                 use hex::DisplayHex;
10017
10018                 // Test vectors from BOLT 3 Appendices C and F (anchors):
10019                 let feeest = TestFeeEstimator{fee_est: 15000};
10020                 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
10021                 let secp_ctx = Secp256k1::new();
10022
10023                 let mut signer = InMemorySigner::new(
10024                         &secp_ctx,
10025                         SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
10026                         SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
10027                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
10028                         SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
10029                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
10030
10031                         // These aren't set in the test vectors:
10032                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
10033                         10_000_000,
10034                         [0; 32],
10035                         [0; 32],
10036                 );
10037
10038                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
10039                                 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
10040                 let keys_provider = Keys { signer: signer.clone() };
10041
10042                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10043                 let mut config = UserConfig::default();
10044                 config.channel_handshake_config.announced_channel = false;
10045                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
10046                 chan.context.holder_dust_limit_satoshis = 546;
10047                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
10048
10049                 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
10050
10051                 let counterparty_pubkeys = ChannelPublicKeys {
10052                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
10053                         revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
10054                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
10055                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
10056                         htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
10057                 };
10058                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
10059                         CounterpartyChannelTransactionParameters {
10060                                 pubkeys: counterparty_pubkeys.clone(),
10061                                 selected_contest_delay: 144
10062                         });
10063                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
10064                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
10065
10066                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
10067                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
10068
10069                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
10070                            <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
10071
10072                 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
10073                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
10074
10075                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
10076                 // derived from a commitment_seed, so instead we copy it here and call
10077                 // build_commitment_transaction.
10078                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
10079                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10080                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10081                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
10082                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
10083
10084                 macro_rules! test_commitment {
10085                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
10086                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10087                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
10088                         };
10089                 }
10090
10091                 macro_rules! test_commitment_with_anchors {
10092                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
10093                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10094                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
10095                         };
10096                 }
10097
10098                 macro_rules! test_commitment_common {
10099                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
10100                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
10101                         } ) => { {
10102                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
10103                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
10104
10105                                         let htlcs = commitment_stats.htlcs_included.drain(..)
10106                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
10107                                                 .collect();
10108                                         (commitment_stats.tx, htlcs)
10109                                 };
10110                                 let trusted_tx = commitment_tx.trust();
10111                                 let unsigned_tx = trusted_tx.built_transaction();
10112                                 let redeemscript = chan.context.get_funding_redeemscript();
10113                                 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
10114                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
10115                                 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
10116                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
10117
10118                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
10119                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
10120                                 let mut counterparty_htlc_sigs = Vec::new();
10121                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
10122                                 $({
10123                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10124                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
10125                                         counterparty_htlc_sigs.push(remote_signature);
10126                                 })*
10127                                 assert_eq!(htlcs.len(), per_htlc.len());
10128
10129                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
10130                                         commitment_tx.clone(),
10131                                         counterparty_signature,
10132                                         counterparty_htlc_sigs,
10133                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
10134                                         chan.context.counterparty_funding_pubkey()
10135                                 );
10136                                 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
10137                                 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
10138
10139                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
10140                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
10141                                 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
10142
10143                                 // ((htlc, counterparty_sig), (index, holder_sig))
10144                                 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
10145
10146                                 $({
10147                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
10148                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
10149
10150                                         let ref htlc = htlcs[$htlc_idx];
10151                                         let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
10152                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
10153                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
10154                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
10155                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
10156                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
10157                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
10158
10159                                         let mut preimage: Option<PaymentPreimage> = None;
10160                                         if !htlc.offered {
10161                                                 for i in 0..5 {
10162                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
10163                                                         if out == htlc.payment_hash {
10164                                                                 preimage = Some(PaymentPreimage([i; 32]));
10165                                                         }
10166                                                 }
10167
10168                                                 assert!(preimage.is_some());
10169                                         }
10170
10171                                         let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
10172                                         let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
10173                                                 channel_derivation_parameters: ChannelDerivationParameters {
10174                                                         value_satoshis: chan.context.channel_value_satoshis,
10175                                                         keys_id: chan.context.channel_keys_id,
10176                                                         transaction_parameters: chan.context.channel_transaction_parameters.clone(),
10177                                                 },
10178                                                 commitment_txid: trusted_tx.txid(),
10179                                                 per_commitment_number: trusted_tx.commitment_number(),
10180                                                 per_commitment_point: trusted_tx.per_commitment_point(),
10181                                                 feerate_per_kw: trusted_tx.feerate_per_kw(),
10182                                                 htlc: htlc.clone(),
10183                                                 preimage: preimage.clone(),
10184                                                 counterparty_sig: *htlc_counterparty_sig,
10185                                         }, &secp_ctx).unwrap();
10186                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
10187                                         assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
10188
10189                                         let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
10190                                         assert_eq!(signature, htlc_holder_sig, "htlc sig");
10191                                         let trusted_tx = holder_commitment_tx.trust();
10192                                         htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
10193                                         log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
10194                                         assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
10195                                 })*
10196                                 assert!(htlc_counterparty_sig_iter.next().is_none());
10197                         } }
10198                 }
10199
10200                 // anchors: simple commitment tx with no HTLCs and single anchor
10201                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
10202                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
10203                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10204
10205                 // simple commitment tx with no HTLCs
10206                 chan.context.value_to_self_msat = 7000000000;
10207
10208                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
10209                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
10210                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10211
10212                 // anchors: simple commitment tx with no HTLCs
10213                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
10214                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
10215                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10216
10217                 chan.context.pending_inbound_htlcs.push({
10218                         let mut out = InboundHTLCOutput{
10219                                 htlc_id: 0,
10220                                 amount_msat: 1000000,
10221                                 cltv_expiry: 500,
10222                                 payment_hash: PaymentHash([0; 32]),
10223                                 state: InboundHTLCState::Committed,
10224                         };
10225                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
10226                         out
10227                 });
10228                 chan.context.pending_inbound_htlcs.push({
10229                         let mut out = InboundHTLCOutput{
10230                                 htlc_id: 1,
10231                                 amount_msat: 2000000,
10232                                 cltv_expiry: 501,
10233                                 payment_hash: PaymentHash([0; 32]),
10234                                 state: InboundHTLCState::Committed,
10235                         };
10236                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10237                         out
10238                 });
10239                 chan.context.pending_outbound_htlcs.push({
10240                         let mut out = OutboundHTLCOutput{
10241                                 htlc_id: 2,
10242                                 amount_msat: 2000000,
10243                                 cltv_expiry: 502,
10244                                 payment_hash: PaymentHash([0; 32]),
10245                                 state: OutboundHTLCState::Committed,
10246                                 source: HTLCSource::dummy(),
10247                                 skimmed_fee_msat: None,
10248                                 blinding_point: None,
10249                         };
10250                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
10251                         out
10252                 });
10253                 chan.context.pending_outbound_htlcs.push({
10254                         let mut out = OutboundHTLCOutput{
10255                                 htlc_id: 3,
10256                                 amount_msat: 3000000,
10257                                 cltv_expiry: 503,
10258                                 payment_hash: PaymentHash([0; 32]),
10259                                 state: OutboundHTLCState::Committed,
10260                                 source: HTLCSource::dummy(),
10261                                 skimmed_fee_msat: None,
10262                                 blinding_point: None,
10263                         };
10264                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
10265                         out
10266                 });
10267                 chan.context.pending_inbound_htlcs.push({
10268                         let mut out = InboundHTLCOutput{
10269                                 htlc_id: 4,
10270                                 amount_msat: 4000000,
10271                                 cltv_expiry: 504,
10272                                 payment_hash: PaymentHash([0; 32]),
10273                                 state: InboundHTLCState::Committed,
10274                         };
10275                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
10276                         out
10277                 });
10278
10279                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
10280                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10281                 chan.context.feerate_per_kw = 0;
10282
10283                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
10284                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
10285                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10286
10287                                   { 0,
10288                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
10289                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
10290                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10291
10292                                   { 1,
10293                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
10294                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
10295                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10296
10297                                   { 2,
10298                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
10299                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
10300                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10301
10302                                   { 3,
10303                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
10304                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
10305                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10306
10307                                   { 4,
10308                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
10309                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
10310                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10311                 } );
10312
10313                 // commitment tx with seven outputs untrimmed (maximum feerate)
10314                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10315                 chan.context.feerate_per_kw = 647;
10316
10317                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
10318                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
10319                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10320
10321                                   { 0,
10322                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
10323                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
10324                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
10325
10326                                   { 1,
10327                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
10328                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
10329                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10330
10331                                   { 2,
10332                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
10333                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
10334                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10335
10336                                   { 3,
10337                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
10338                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
10339                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10340
10341                                   { 4,
10342                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
10343                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
10344                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10345                 } );
10346
10347                 // commitment tx with six outputs untrimmed (minimum feerate)
10348                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10349                 chan.context.feerate_per_kw = 648;
10350
10351                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
10352                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
10353                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10354
10355                                   { 0,
10356                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
10357                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
10358                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10359
10360                                   { 1,
10361                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
10362                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
10363                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10364
10365                                   { 2,
10366                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
10367                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
10368                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10369
10370                                   { 3,
10371                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
10372                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
10373                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10374                 } );
10375
10376                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
10377                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10378                 chan.context.feerate_per_kw = 645;
10379                 chan.context.holder_dust_limit_satoshis = 1001;
10380
10381                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
10382                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
10383                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10384
10385                                   { 0,
10386                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
10387                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
10388                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
10389
10390                                   { 1,
10391                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
10392                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
10393                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10394
10395                                   { 2,
10396                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
10397                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
10398                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10399
10400                                   { 3,
10401                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
10402                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
10403                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10404                 } );
10405
10406                 // commitment tx with six outputs untrimmed (maximum feerate)
10407                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10408                 chan.context.feerate_per_kw = 2069;
10409                 chan.context.holder_dust_limit_satoshis = 546;
10410
10411                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
10412                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
10413                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10414
10415                                   { 0,
10416                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
10417                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
10418                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10419
10420                                   { 1,
10421                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
10422                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
10423                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10424
10425                                   { 2,
10426                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
10427                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
10428                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10429
10430                                   { 3,
10431                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
10432                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
10433                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10434                 } );
10435
10436                 // commitment tx with five outputs untrimmed (minimum feerate)
10437                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10438                 chan.context.feerate_per_kw = 2070;
10439
10440                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
10441                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
10442                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10443
10444                                   { 0,
10445                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
10446                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
10447                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10448
10449                                   { 1,
10450                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
10451                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
10452                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10453
10454                                   { 2,
10455                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
10456                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
10457                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10458                 } );
10459
10460                 // commitment tx with five outputs untrimmed (maximum feerate)
10461                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10462                 chan.context.feerate_per_kw = 2194;
10463
10464                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
10465                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
10466                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10467
10468                                   { 0,
10469                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
10470                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
10471                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
10472
10473                                   { 1,
10474                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
10475                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
10476                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10477
10478                                   { 2,
10479                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
10480                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
10481                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10482                 } );
10483
10484                 // commitment tx with four outputs untrimmed (minimum feerate)
10485                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10486                 chan.context.feerate_per_kw = 2195;
10487
10488                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
10489                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
10490                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10491
10492                                   { 0,
10493                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
10494                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
10495                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10496
10497                                   { 1,
10498                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
10499                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
10500                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10501                 } );
10502
10503                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
10504                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10505                 chan.context.feerate_per_kw = 2185;
10506                 chan.context.holder_dust_limit_satoshis = 2001;
10507                 let cached_channel_type = chan.context.channel_type;
10508                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10509
10510                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
10511                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
10512                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10513
10514                                   { 0,
10515                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
10516                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
10517                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10518
10519                                   { 1,
10520                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10521                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10522                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10523                 } );
10524
10525                 // commitment tx with four outputs untrimmed (maximum feerate)
10526                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10527                 chan.context.feerate_per_kw = 3702;
10528                 chan.context.holder_dust_limit_satoshis = 546;
10529                 chan.context.channel_type = cached_channel_type.clone();
10530
10531                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10532                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10533                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10534
10535                                   { 0,
10536                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10537                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10538                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10539
10540                                   { 1,
10541                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10542                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10543                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10544                 } );
10545
10546                 // commitment tx with three outputs untrimmed (minimum feerate)
10547                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10548                 chan.context.feerate_per_kw = 3703;
10549
10550                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10551                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10552                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10553
10554                                   { 0,
10555                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10556                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10557                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10558                 } );
10559
10560                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10561                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10562                 chan.context.feerate_per_kw = 3687;
10563                 chan.context.holder_dust_limit_satoshis = 3001;
10564                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10565
10566                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10567                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10568                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10569
10570                                   { 0,
10571                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10572                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10573                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10574                 } );
10575
10576                 // commitment tx with three outputs untrimmed (maximum feerate)
10577                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10578                 chan.context.feerate_per_kw = 4914;
10579                 chan.context.holder_dust_limit_satoshis = 546;
10580                 chan.context.channel_type = cached_channel_type.clone();
10581
10582                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10583                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10584                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10585
10586                                   { 0,
10587                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10588                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10589                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10590                 } );
10591
10592                 // commitment tx with two outputs untrimmed (minimum feerate)
10593                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10594                 chan.context.feerate_per_kw = 4915;
10595                 chan.context.holder_dust_limit_satoshis = 546;
10596
10597                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10598                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10599                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10600
10601                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10602                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10603                 chan.context.feerate_per_kw = 4894;
10604                 chan.context.holder_dust_limit_satoshis = 4001;
10605                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10606
10607                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10608                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10609                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10610
10611                 // commitment tx with two outputs untrimmed (maximum feerate)
10612                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10613                 chan.context.feerate_per_kw = 9651180;
10614                 chan.context.holder_dust_limit_satoshis = 546;
10615                 chan.context.channel_type = cached_channel_type.clone();
10616
10617                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10618                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10619                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10620
10621                 // commitment tx with one output untrimmed (minimum feerate)
10622                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10623                 chan.context.feerate_per_kw = 9651181;
10624
10625                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10626                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10627                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10628
10629                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10630                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10631                 chan.context.feerate_per_kw = 6216010;
10632                 chan.context.holder_dust_limit_satoshis = 4001;
10633                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10634
10635                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10636                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10637                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10638
10639                 // commitment tx with fee greater than funder amount
10640                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10641                 chan.context.feerate_per_kw = 9651936;
10642                 chan.context.holder_dust_limit_satoshis = 546;
10643                 chan.context.channel_type = cached_channel_type;
10644
10645                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10646                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10647                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10648
10649                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10650                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10651                 chan.context.feerate_per_kw = 253;
10652                 chan.context.pending_inbound_htlcs.clear();
10653                 chan.context.pending_inbound_htlcs.push({
10654                         let mut out = InboundHTLCOutput{
10655                                 htlc_id: 1,
10656                                 amount_msat: 2000000,
10657                                 cltv_expiry: 501,
10658                                 payment_hash: PaymentHash([0; 32]),
10659                                 state: InboundHTLCState::Committed,
10660                         };
10661                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10662                         out
10663                 });
10664                 chan.context.pending_outbound_htlcs.clear();
10665                 chan.context.pending_outbound_htlcs.push({
10666                         let mut out = OutboundHTLCOutput{
10667                                 htlc_id: 6,
10668                                 amount_msat: 5000001,
10669                                 cltv_expiry: 506,
10670                                 payment_hash: PaymentHash([0; 32]),
10671                                 state: OutboundHTLCState::Committed,
10672                                 source: HTLCSource::dummy(),
10673                                 skimmed_fee_msat: None,
10674                                 blinding_point: None,
10675                         };
10676                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10677                         out
10678                 });
10679                 chan.context.pending_outbound_htlcs.push({
10680                         let mut out = OutboundHTLCOutput{
10681                                 htlc_id: 5,
10682                                 amount_msat: 5000000,
10683                                 cltv_expiry: 505,
10684                                 payment_hash: PaymentHash([0; 32]),
10685                                 state: OutboundHTLCState::Committed,
10686                                 source: HTLCSource::dummy(),
10687                                 skimmed_fee_msat: None,
10688                                 blinding_point: None,
10689                         };
10690                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10691                         out
10692                 });
10693
10694                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10695                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10696                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10697
10698                                   { 0,
10699                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10700                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10701                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10702                                   { 1,
10703                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10704                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10705                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10706                                   { 2,
10707                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10708                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10709                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10710                 } );
10711
10712                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10713                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10714                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10715                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10716
10717                                   { 0,
10718                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10719                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10720                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10721                                   { 1,
10722                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10723                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10724                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10725                                   { 2,
10726                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10727                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10728                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10729                 } );
10730         }
10731
10732         #[test]
10733         fn test_per_commitment_secret_gen() {
10734                 // Test vectors from BOLT 3 Appendix D:
10735
10736                 let mut seed = [0; 32];
10737                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10738                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10739                            <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10740
10741                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10742                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10743                            <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10744
10745                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10746                            <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10747
10748                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10749                            <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10750
10751                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10752                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10753                            <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10754         }
10755
10756         #[test]
10757         fn test_key_derivation() {
10758                 // Test vectors from BOLT 3 Appendix E:
10759                 let secp_ctx = Secp256k1::new();
10760
10761                 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10762                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10763
10764                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10765                 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10766
10767                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10768                 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10769
10770                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10771                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10772
10773                 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10774                                 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10775
10776                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10777                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10778         }
10779
10780         #[test]
10781         fn test_zero_conf_channel_type_support() {
10782                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10783                 let secp_ctx = Secp256k1::new();
10784                 let seed = [42; 32];
10785                 let network = Network::Testnet;
10786                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10787                 let logger = test_utils::TestLogger::new();
10788
10789                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10790                 let config = UserConfig::default();
10791                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10792                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10793
10794                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10795                 channel_type_features.set_zero_conf_required();
10796
10797                 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10798                 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10799                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10800                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10801                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10802                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10803                 assert!(res.is_ok());
10804         }
10805
10806         #[test]
10807         fn test_supports_anchors_zero_htlc_tx_fee() {
10808                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10809                 // resulting `channel_type`.
10810                 let secp_ctx = Secp256k1::new();
10811                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10812                 let network = Network::Testnet;
10813                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10814                 let logger = test_utils::TestLogger::new();
10815
10816                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10817                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10818
10819                 let mut config = UserConfig::default();
10820                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10821
10822                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10823                 // need to signal it.
10824                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10825                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10826                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10827                         &config, 0, 42, None
10828                 ).unwrap();
10829                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10830
10831                 let mut expected_channel_type = ChannelTypeFeatures::empty();
10832                 expected_channel_type.set_static_remote_key_required();
10833                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10834
10835                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10836                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10837                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10838                         None
10839                 ).unwrap();
10840
10841                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10842                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10843                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10844                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10845                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10846                 ).unwrap();
10847
10848                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10849                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10850         }
10851
10852         #[test]
10853         fn test_rejects_implicit_simple_anchors() {
10854                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10855                 // each side's `InitFeatures`, it is rejected.
10856                 let secp_ctx = Secp256k1::new();
10857                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10858                 let network = Network::Testnet;
10859                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10860                 let logger = test_utils::TestLogger::new();
10861
10862                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10863                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10864
10865                 let config = UserConfig::default();
10866
10867                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10868                 let static_remote_key_required: u64 = 1 << 12;
10869                 let simple_anchors_required: u64 = 1 << 20;
10870                 let raw_init_features = static_remote_key_required | simple_anchors_required;
10871                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10872
10873                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10874                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10875                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10876                         None
10877                 ).unwrap();
10878
10879                 // Set `channel_type` to `None` to force the implicit feature negotiation.
10880                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10881                 open_channel_msg.common_fields.channel_type = None;
10882
10883                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10884                 // `static_remote_key`, it will fail the channel.
10885                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10886                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10887                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10888                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10889                 );
10890                 assert!(channel_b.is_err());
10891         }
10892
10893         #[test]
10894         fn test_rejects_simple_anchors_channel_type() {
10895                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10896                 // it is rejected.
10897                 let secp_ctx = Secp256k1::new();
10898                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10899                 let network = Network::Testnet;
10900                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10901                 let logger = test_utils::TestLogger::new();
10902
10903                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10904                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10905
10906                 let config = UserConfig::default();
10907
10908                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10909                 let static_remote_key_required: u64 = 1 << 12;
10910                 let simple_anchors_required: u64 = 1 << 20;
10911                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10912                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10913                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10914                 assert!(!simple_anchors_init.requires_unknown_bits());
10915                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
10916
10917                 // First, we'll try to open a channel between A and B where A requests a channel type for
10918                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
10919                 // B as it's not supported by LDK.
10920                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10921                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10922                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10923                         None
10924                 ).unwrap();
10925
10926                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10927                 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10928
10929                 let res = InboundV1Channel::<&TestKeysInterface>::new(
10930                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10931                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
10932                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10933                 );
10934                 assert!(res.is_err());
10935
10936                 // Then, we'll try to open another channel where A requests a channel type for
10937                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
10938                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
10939                 // LDK.
10940                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10941                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
10942                         10000000, 100000, 42, &config, 0, 42, None
10943                 ).unwrap();
10944
10945                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10946
10947                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10948                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10949                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10950                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10951                 ).unwrap();
10952
10953                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
10954                 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10955
10956                 let res = channel_a.accept_channel(
10957                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
10958                 );
10959                 assert!(res.is_err());
10960         }
10961
10962         #[test]
10963         fn test_waiting_for_batch() {
10964                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10965                 let logger = test_utils::TestLogger::new();
10966                 let secp_ctx = Secp256k1::new();
10967                 let seed = [42; 32];
10968                 let network = Network::Testnet;
10969                 let best_block = BestBlock::from_network(network);
10970                 let chain_hash = ChainHash::using_genesis_block(network);
10971                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10972
10973                 let mut config = UserConfig::default();
10974                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10975                 // channel in a batch before all channels are ready.
10976                 config.channel_handshake_limits.trust_own_funding_0conf = true;
10977
10978                 // Create a channel from node a to node b that will be part of batch funding.
10979                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10980                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10981                         &feeest,
10982                         &&keys_provider,
10983                         &&keys_provider,
10984                         node_b_node_id,
10985                         &channelmanager::provided_init_features(&config),
10986                         10000000,
10987                         100000,
10988                         42,
10989                         &config,
10990                         0,
10991                         42,
10992                         None
10993                 ).unwrap();
10994
10995                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10996                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10997                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10998                         &feeest,
10999                         &&keys_provider,
11000                         &&keys_provider,
11001                         node_b_node_id,
11002                         &channelmanager::provided_channel_type_features(&config),
11003                         &channelmanager::provided_init_features(&config),
11004                         &open_channel_msg,
11005                         7,
11006                         &config,
11007                         0,
11008                         &&logger,
11009                         true,  // Allow node b to send a 0conf channel_ready.
11010                 ).unwrap();
11011
11012                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
11013                 node_a_chan.accept_channel(
11014                         &accept_channel_msg,
11015                         &config.channel_handshake_limits,
11016                         &channelmanager::provided_init_features(&config),
11017                 ).unwrap();
11018
11019                 // Fund the channel with a batch funding transaction.
11020                 let output_script = node_a_chan.context.get_funding_redeemscript();
11021                 let tx = Transaction {
11022                         version: 1,
11023                         lock_time: LockTime::ZERO,
11024                         input: Vec::new(),
11025                         output: vec![
11026                                 TxOut {
11027                                         value: 10000000, script_pubkey: output_script.clone(),
11028                                 },
11029                                 TxOut {
11030                                         value: 10000000, script_pubkey: Builder::new().into_script(),
11031                                 },
11032                         ]};
11033                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
11034                 let funding_created_msg = node_a_chan.get_funding_created(
11035                         tx.clone(), funding_outpoint, true, &&logger,
11036                 ).map_err(|_| ()).unwrap();
11037                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
11038                         &funding_created_msg.unwrap(),
11039                         best_block,
11040                         &&keys_provider,
11041                         &&logger,
11042                 ).map_err(|_| ()).unwrap();
11043                 let node_b_updates = node_b_chan.monitor_updating_restored(
11044                         &&logger,
11045                         &&keys_provider,
11046                         chain_hash,
11047                         &config,
11048                         0,
11049                 );
11050
11051                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
11052                 // broadcasting the funding transaction until the batch is ready.
11053                 let res = node_a_chan.funding_signed(
11054                         &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
11055                 );
11056                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
11057                 let node_a_updates = node_a_chan.monitor_updating_restored(
11058                         &&logger,
11059                         &&keys_provider,
11060                         chain_hash,
11061                         &config,
11062                         0,
11063                 );
11064                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
11065                 // as the funding transaction depends on all channels in the batch becoming ready.
11066                 assert!(node_a_updates.channel_ready.is_none());
11067                 assert!(node_a_updates.funding_broadcastable.is_none());
11068                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
11069
11070                 // It is possible to receive a 0conf channel_ready from the remote node.
11071                 node_a_chan.channel_ready(
11072                         &node_b_updates.channel_ready.unwrap(),
11073                         &&keys_provider,
11074                         chain_hash,
11075                         &config,
11076                         &best_block,
11077                         &&logger,
11078                 ).unwrap();
11079                 assert_eq!(
11080                         node_a_chan.context.channel_state,
11081                         ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
11082                 );
11083
11084                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
11085                 node_a_chan.set_batch_ready();
11086                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
11087                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
11088         }
11089 }