ed2350839545d7f9029b52fac0a50ace91adec9d
[rust-lightning] / lightning / src / ln / channel.rs
1 // This file is Copyright its original authors, visible in version control
2 // history.
3 //
4 // This file is licensed under the Apache License, Version 2.0 <LICENSE-APACHE
5 // or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
6 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
7 // You may not use this file except in accordance with one or both of these
8 // licenses.
9
10 use bitcoin::blockdata::constants::ChainHash;
11 use bitcoin::blockdata::script::{Script, ScriptBuf, Builder};
12 use bitcoin::blockdata::transaction::Transaction;
13 use bitcoin::sighash;
14 use bitcoin::sighash::EcdsaSighashType;
15 use bitcoin::consensus::encode;
16
17 use bitcoin::hashes::Hash;
18 use bitcoin::hashes::sha256::Hash as Sha256;
19 use bitcoin::hashes::sha256d::Hash as Sha256d;
20 use bitcoin::hash_types::{Txid, BlockHash};
21
22 use bitcoin::secp256k1::constants::PUBLIC_KEY_SIZE;
23 use bitcoin::secp256k1::{PublicKey,SecretKey};
24 use bitcoin::secp256k1::{Secp256k1,ecdsa::Signature};
25 use bitcoin::secp256k1;
26
27 use crate::ln::{ChannelId, PaymentPreimage, PaymentHash};
28 use crate::ln::features::{ChannelTypeFeatures, InitFeatures};
29 use crate::ln::msgs;
30 use crate::ln::msgs::DecodeError;
31 use crate::ln::script::{self, ShutdownScript};
32 use crate::ln::channelmanager::{self, CounterpartyForwardingInfo, PendingHTLCStatus, HTLCSource, SentHTLCId, HTLCFailureMsg, PendingHTLCInfo, RAACommitmentOrder, BREAKDOWN_TIMEOUT, MIN_CLTV_EXPIRY_DELTA, MAX_LOCAL_BREAKDOWN_TIMEOUT, ChannelShutdownState};
33 use crate::ln::chan_utils::{CounterpartyCommitmentSecrets, TxCreationKeys, HTLCOutputInCommitment, htlc_success_tx_weight, htlc_timeout_tx_weight, make_funding_redeemscript, ChannelPublicKeys, CommitmentTransaction, HolderCommitmentTransaction, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, MAX_HTLCS, get_commitment_transaction_number_obscure_factor, ClosingTransaction};
34 use crate::ln::chan_utils;
35 use crate::ln::onion_utils::HTLCFailReason;
36 use crate::chain::BestBlock;
37 use crate::chain::chaininterface::{FeeEstimator, ConfirmationTarget, LowerBoundedFeeEstimator};
38 use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, LATENCY_GRACE_PERIOD_BLOCKS, CLOSED_CHANNEL_UPDATE_ID};
39 use crate::chain::transaction::{OutPoint, TransactionData};
40 use crate::sign::ecdsa::{EcdsaChannelSigner, WriteableEcdsaChannelSigner};
41 use crate::sign::{EntropySource, ChannelSigner, SignerProvider, NodeSigner, Recipient};
42 use crate::events::ClosureReason;
43 use crate::routing::gossip::NodeId;
44 use crate::util::ser::{Readable, ReadableArgs, Writeable, Writer};
45 use crate::util::logger::{Logger, Record, WithContext};
46 use crate::util::errors::APIError;
47 use crate::util::config::{UserConfig, ChannelConfig, LegacyChannelConfig, ChannelHandshakeConfig, ChannelHandshakeLimits, MaxDustHTLCExposure};
48 use crate::util::scid_utils::scid_from_parts;
49
50 use crate::io;
51 use crate::prelude::*;
52 use core::{cmp,mem,fmt};
53 use core::convert::TryInto;
54 use core::ops::Deref;
55 #[cfg(any(test, fuzzing, debug_assertions))]
56 use crate::sync::Mutex;
57 use crate::sign::type_resolver::ChannelSignerType;
58
59 use super::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint, RevocationBasepoint};
60
61 #[cfg(test)]
62 pub struct ChannelValueStat {
63         pub value_to_self_msat: u64,
64         pub channel_value_msat: u64,
65         pub channel_reserve_msat: u64,
66         pub pending_outbound_htlcs_amount_msat: u64,
67         pub pending_inbound_htlcs_amount_msat: u64,
68         pub holding_cell_outbound_amount_msat: u64,
69         pub counterparty_max_htlc_value_in_flight_msat: u64, // outgoing
70         pub counterparty_dust_limit_msat: u64,
71 }
72
73 pub struct AvailableBalances {
74         /// The amount that would go to us if we close the channel, ignoring any on-chain fees.
75         pub balance_msat: u64,
76         /// Total amount available for our counterparty to send to us.
77         pub inbound_capacity_msat: u64,
78         /// Total amount available for us to send to our counterparty.
79         pub outbound_capacity_msat: u64,
80         /// The maximum value we can assign to the next outbound HTLC
81         pub next_outbound_htlc_limit_msat: u64,
82         /// The minimum value we can assign to the next outbound HTLC
83         pub next_outbound_htlc_minimum_msat: u64,
84 }
85
86 #[derive(Debug, Clone, Copy, PartialEq)]
87 enum FeeUpdateState {
88         // Inbound states mirroring InboundHTLCState
89         RemoteAnnounced,
90         AwaitingRemoteRevokeToAnnounce,
91         // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally
92         // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the
93         // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding"
94         // the fee update anywhere, we can simply consider the fee update `Committed` immediately
95         // instead of setting it to AwaitingAnnouncedRemoteRevoke.
96
97         // Outbound state can only be `LocalAnnounced` or `Committed`
98         Outbound,
99 }
100
101 enum InboundHTLCRemovalReason {
102         FailRelay(msgs::OnionErrorPacket),
103         FailMalformed(([u8; 32], u16)),
104         Fulfill(PaymentPreimage),
105 }
106
107 enum InboundHTLCState {
108         /// Offered by remote, to be included in next local commitment tx. I.e., the remote sent an
109         /// update_add_htlc message for this HTLC.
110         RemoteAnnounced(PendingHTLCStatus),
111         /// Included in a received commitment_signed message (implying we've
112         /// revoke_and_ack'd it), but the remote hasn't yet revoked their previous
113         /// state (see the example below). We have not yet included this HTLC in a
114         /// commitment_signed message because we are waiting on the remote's
115         /// aforementioned state revocation. One reason this missing remote RAA
116         /// (revoke_and_ack) blocks us from constructing a commitment_signed message
117         /// is because every time we create a new "state", i.e. every time we sign a
118         /// new commitment tx (see [BOLT #2]), we need a new per_commitment_point,
119         /// which are provided one-at-a-time in each RAA. E.g., the last RAA they
120         /// sent provided the per_commitment_point for our current commitment tx.
121         /// The other reason we should not send a commitment_signed without their RAA
122         /// is because their RAA serves to ACK our previous commitment_signed.
123         ///
124         /// Here's an example of how an HTLC could come to be in this state:
125         /// remote --> update_add_htlc(prev_htlc)   --> local
126         /// remote --> commitment_signed(prev_htlc) --> local
127         /// remote <-- revoke_and_ack               <-- local
128         /// remote <-- commitment_signed(prev_htlc) <-- local
129         /// [note that here, the remote does not respond with a RAA]
130         /// remote --> update_add_htlc(this_htlc)   --> local
131         /// remote --> commitment_signed(prev_htlc, this_htlc) --> local
132         /// Now `this_htlc` will be assigned this state. It's unable to be officially
133         /// accepted, i.e. included in a commitment_signed, because we're missing the
134         /// RAA that provides our next per_commitment_point. The per_commitment_point
135         /// is used to derive commitment keys, which are used to construct the
136         /// signatures in a commitment_signed message.
137         /// Implies AwaitingRemoteRevoke.
138         ///
139         /// [BOLT #2]: https://github.com/lightning/bolts/blob/master/02-peer-protocol.md
140         AwaitingRemoteRevokeToAnnounce(PendingHTLCStatus),
141         /// Included in a received commitment_signed message (implying we've revoke_and_ack'd it).
142         /// We have also included this HTLC in our latest commitment_signed and are now just waiting
143         /// on the remote's revoke_and_ack to make this HTLC an irrevocable part of the state of the
144         /// channel (before it can then get forwarded and/or removed).
145         /// Implies AwaitingRemoteRevoke.
146         AwaitingAnnouncedRemoteRevoke(PendingHTLCStatus),
147         Committed,
148         /// Removed by us and a new commitment_signed was sent (if we were AwaitingRemoteRevoke when we
149         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
150         /// we'll drop it.
151         /// Note that we have to keep an eye on the HTLC until we've received a broadcastable
152         /// commitment transaction without it as otherwise we'll have to force-close the channel to
153         /// claim it before the timeout (obviously doesn't apply to revoked HTLCs that we can't claim
154         /// anyway). That said, ChannelMonitor does this for us (see
155         /// ChannelMonitor::should_broadcast_holder_commitment_txn) so we actually remove the HTLC from
156         /// our own local state before then, once we're sure that the next commitment_signed and
157         /// ChannelMonitor::provide_latest_local_commitment_tx will not include this HTLC.
158         LocalRemoved(InboundHTLCRemovalReason),
159 }
160
161 /// Exposes the state of pending inbound HTLCs.
162 ///
163 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
164 /// through the following states in the state machine:
165 /// - Announced for addition by the originating node through the update_add_htlc message.
166 /// - Added to the commitment transaction of the receiving node and originating node in turn
167 ///   through the exchange of commitment_signed and revoke_and_ack messages.
168 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
169 ///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
170 /// - Removed from the commitment transaction of the originating node and receiving node in turn
171 ///   through the exchange of commitment_signed and revoke_and_ack messages.
172 ///
173 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
174 #[derive(Clone, Debug, PartialEq)]
175 pub enum InboundHTLCStateDetails {
176         /// We have added this HTLC in our commitment transaction by receiving commitment_signed and
177         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
178         /// before this HTLC is included on the remote commitment transaction.
179         AwaitingRemoteRevokeToAdd,
180         /// This HTLC has been included in the commitment_signed and revoke_and_ack messages on both sides
181         /// and is included in both commitment transactions.
182         ///
183         /// This HTLC is now safe to either forward or be claimed as a payment by us. The HTLC will
184         /// remain in this state until the forwarded upstream HTLC has been resolved and we resolve this
185         /// HTLC correspondingly, or until we claim it as a payment. If it is part of a multipart
186         /// payment, it will only be claimed together with other required parts.
187         Committed,
188         /// We have received the preimage for this HTLC and it is being removed by fulfilling it with
189         /// update_fulfill_htlc. This HTLC is still on both commitment transactions, but we are awaiting
190         /// the appropriate revoke_and_ack's from the remote before this HTLC is removed from the remote
191         /// commitment transaction after update_fulfill_htlc.
192         AwaitingRemoteRevokeToRemoveFulfill,
193         /// The HTLC is being removed by failing it with update_fail_htlc or update_fail_malformed_htlc.
194         /// This HTLC is still on both commitment transactions, but we are awaiting the appropriate
195         /// revoke_and_ack's from the remote before this HTLC is removed from the remote commitment
196         /// transaction.
197         AwaitingRemoteRevokeToRemoveFail,
198 }
199
200 impl From<&InboundHTLCState> for Option<InboundHTLCStateDetails> {
201         fn from(state: &InboundHTLCState) -> Option<InboundHTLCStateDetails> {
202                 match state {
203                         InboundHTLCState::RemoteAnnounced(_) => None,
204                         InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) =>
205                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
206                         InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) =>
207                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
208                         InboundHTLCState::Committed =>
209                                 Some(InboundHTLCStateDetails::Committed),
210                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(_)) =>
211                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
212                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(_)) =>
213                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail),
214                         InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) =>
215                                 Some(InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill),
216                 }
217         }
218 }
219
220 impl_writeable_tlv_based_enum_upgradable!(InboundHTLCStateDetails,
221         (0, AwaitingRemoteRevokeToAdd) => {},
222         (2, Committed) => {},
223         (4, AwaitingRemoteRevokeToRemoveFulfill) => {},
224         (6, AwaitingRemoteRevokeToRemoveFail) => {};
225 );
226
227 struct InboundHTLCOutput {
228         htlc_id: u64,
229         amount_msat: u64,
230         cltv_expiry: u32,
231         payment_hash: PaymentHash,
232         state: InboundHTLCState,
233 }
234
235 /// Exposes details around pending inbound HTLCs.
236 #[derive(Clone, Debug, PartialEq)]
237 pub struct InboundHTLCDetails {
238         /// The HTLC ID.
239         /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
240         /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
241         /// and not part of any commitment transaction.
242         pub htlc_id: u64,
243         /// The amount in msat.
244         pub amount_msat: u64,
245         /// The block height at which this HTLC expires.
246         pub cltv_expiry: u32,
247         /// The payment hash.
248         pub payment_hash: PaymentHash,
249         /// The state of the HTLC in the state machine.
250         ///
251         /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
252         /// waiting for to advance to the next state.
253         ///
254         /// See [`InboundHTLCStateDetails`] for information on the specific states.
255         ///
256         /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
257         /// states may result in `None` here.
258         pub state: Option<InboundHTLCStateDetails>,
259         /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
260         /// from the local commitment transaction and added to the commitment transaction fee.
261         /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
262         /// transactions as well.
263         ///
264         /// When the local commitment transaction is broadcasted as part of a unilateral closure,
265         /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
266         /// fee.
267         ///
268         /// Note that dust limits are specific to each party. An HTLC can be dust for the local
269         /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
270         pub is_dust: bool,
271 }
272
273 impl_writeable_tlv_based!(InboundHTLCDetails, {
274         (0, htlc_id, required),
275         (2, amount_msat, required),
276         (4, cltv_expiry, required),
277         (6, payment_hash, required),
278         (7, state, upgradable_option),
279         (8, is_dust, required),
280 });
281
282 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
283 enum OutboundHTLCState {
284         /// Added by us and included in a commitment_signed (if we were AwaitingRemoteRevoke when we
285         /// created it we would have put it in the holding cell instead). When they next revoke_and_ack
286         /// we will promote to Committed (note that they may not accept it until the next time we
287         /// revoke, but we don't really care about that:
288         ///  * they've revoked, so worst case we can announce an old state and get our (option on)
289         ///    money back (though we won't), and,
290         ///  * we'll send them a revoke when they send a commitment_signed, and since only they're
291         ///    allowed to remove it, the "can only be removed once committed on both sides" requirement
292         ///    doesn't matter to us and it's up to them to enforce it, worst-case they jump ahead but
293         ///    we'll never get out of sync).
294         /// Note that we Box the OnionPacket as it's rather large and we don't want to blow up
295         /// OutboundHTLCOutput's size just for a temporary bit
296         LocalAnnounced(Box<msgs::OnionPacket>),
297         Committed,
298         /// Remote removed this (outbound) HTLC. We're waiting on their commitment_signed to finalize
299         /// the change (though they'll need to revoke before we fail the payment).
300         RemoteRemoved(OutboundHTLCOutcome),
301         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
302         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
303         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
304         /// We also have not yet removed this HTLC in a commitment_signed message, and are waiting on a
305         /// remote revoke_and_ack on a previous state before we can do so.
306         AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome),
307         /// Remote removed this and sent a commitment_signed (implying we've revoke_and_ack'ed it), but
308         /// the remote side hasn't yet revoked their previous state, which we need them to do before we
309         /// can do any backwards failing. Implies AwaitingRemoteRevoke.
310         /// We have removed this HTLC in our latest commitment_signed and are now just waiting on a
311         /// revoke_and_ack to drop completely.
312         AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome),
313 }
314
315 /// Exposes the state of pending outbound HTLCs.
316 ///
317 /// At a high level, an HTLC being forwarded from one Lightning node to another Lightning node goes
318 /// through the following states in the state machine:
319 /// - Announced for addition by the originating node through the update_add_htlc message.
320 /// - Added to the commitment transaction of the receiving node and originating node in turn
321 ///   through the exchange of commitment_signed and revoke_and_ack messages.
322 /// - Announced for resolution (fulfillment or failure) by the receiving node through either one of
323 ///   the update_fulfill_htlc, update_fail_htlc, and update_fail_malformed_htlc messages.
324 /// - Removed from the commitment transaction of the originating node and receiving node in turn
325 ///   through the exchange of commitment_signed and revoke_and_ack messages.
326 ///
327 /// This can be used to inspect what next message an HTLC is waiting for to advance its state.
328 #[derive(Clone, Debug, PartialEq)]
329 pub enum OutboundHTLCStateDetails {
330         /// We are awaiting the appropriate revoke_and_ack's from the remote before the HTLC is added
331         /// on the remote's commitment transaction after update_add_htlc.
332         AwaitingRemoteRevokeToAdd,
333         /// The HTLC has been added to the remote's commitment transaction by sending commitment_signed
334         /// and receiving revoke_and_ack in return.
335         ///
336         /// The HTLC will remain in this state until the remote node resolves the HTLC, or until we
337         /// unilaterally close the channel due to a timeout with an uncooperative remote node.
338         Committed,
339         /// The HTLC has been fulfilled successfully by the remote with a preimage in update_fulfill_htlc,
340         /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
341         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
342         /// for the removal from its commitment transaction.
343         AwaitingRemoteRevokeToRemoveSuccess,
344         /// The HTLC has been failed by the remote with update_fail_htlc or update_fail_malformed_htlc,
345         /// and we removed the HTLC from our commitment transaction by receiving commitment_signed and
346         /// returning revoke_and_ack. We are awaiting the appropriate revoke_and_ack's from the remote
347         /// for the removal from its commitment transaction.
348         AwaitingRemoteRevokeToRemoveFailure,
349 }
350
351 impl From<&OutboundHTLCState> for OutboundHTLCStateDetails {
352         fn from(state: &OutboundHTLCState) -> OutboundHTLCStateDetails {
353                 match state {
354                         OutboundHTLCState::LocalAnnounced(_) =>
355                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd,
356                         OutboundHTLCState::Committed =>
357                                 OutboundHTLCStateDetails::Committed,
358                         // RemoteRemoved states are ignored as the state is transient and the remote has not committed to
359                         // the state yet.
360                         OutboundHTLCState::RemoteRemoved(_) =>
361                                 OutboundHTLCStateDetails::Committed,
362                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) =>
363                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
364                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Failure(_)) =>
365                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
366                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) =>
367                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveSuccess,
368                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Failure(_)) =>
369                                 OutboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFailure,
370                 }
371         }
372 }
373
374 impl_writeable_tlv_based_enum_upgradable!(OutboundHTLCStateDetails,
375         (0, AwaitingRemoteRevokeToAdd) => {},
376         (2, Committed) => {},
377         (4, AwaitingRemoteRevokeToRemoveSuccess) => {},
378         (6, AwaitingRemoteRevokeToRemoveFailure) => {};
379 );
380
381 #[derive(Clone)]
382 #[cfg_attr(test, derive(Debug, PartialEq))]
383 enum OutboundHTLCOutcome {
384         /// LDK version 0.0.105+ will always fill in the preimage here.
385         Success(Option<PaymentPreimage>),
386         Failure(HTLCFailReason),
387 }
388
389 impl From<Option<HTLCFailReason>> for OutboundHTLCOutcome {
390         fn from(o: Option<HTLCFailReason>) -> Self {
391                 match o {
392                         None => OutboundHTLCOutcome::Success(None),
393                         Some(r) => OutboundHTLCOutcome::Failure(r)
394                 }
395         }
396 }
397
398 impl<'a> Into<Option<&'a HTLCFailReason>> for &'a OutboundHTLCOutcome {
399         fn into(self) -> Option<&'a HTLCFailReason> {
400                 match self {
401                         OutboundHTLCOutcome::Success(_) => None,
402                         OutboundHTLCOutcome::Failure(ref r) => Some(r)
403                 }
404         }
405 }
406
407 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
408 struct OutboundHTLCOutput {
409         htlc_id: u64,
410         amount_msat: u64,
411         cltv_expiry: u32,
412         payment_hash: PaymentHash,
413         state: OutboundHTLCState,
414         source: HTLCSource,
415         blinding_point: Option<PublicKey>,
416         skimmed_fee_msat: Option<u64>,
417 }
418
419 /// Exposes details around pending outbound HTLCs.
420 #[derive(Clone, Debug, PartialEq)]
421 pub struct OutboundHTLCDetails {
422         /// The HTLC ID.
423         /// The IDs are incremented by 1 starting from 0 for each offered HTLC.
424         /// They are unique per channel and inbound/outbound direction, unless an HTLC was only announced
425         /// and not part of any commitment transaction.
426         ///
427         /// Not present when we are awaiting a remote revocation and the HTLC is not added yet.
428         pub htlc_id: Option<u64>,
429         /// The amount in msat.
430         pub amount_msat: u64,
431         /// The block height at which this HTLC expires.
432         pub cltv_expiry: u32,
433         /// The payment hash.
434         pub payment_hash: PaymentHash,
435         /// The state of the HTLC in the state machine.
436         ///
437         /// Determines on which commitment transactions the HTLC is included and what message the HTLC is
438         /// waiting for to advance to the next state.
439         ///
440         /// See [`OutboundHTLCStateDetails`] for information on the specific states.
441         ///
442         /// LDK will always fill this field in, but when downgrading to prior versions of LDK, new
443         /// states may result in `None` here.
444         pub state: Option<OutboundHTLCStateDetails>,
445         /// The extra fee being skimmed off the top of this HTLC.
446         pub skimmed_fee_msat: Option<u64>,
447         /// Whether the HTLC has an output below the local dust limit. If so, the output will be trimmed
448         /// from the local commitment transaction and added to the commitment transaction fee.
449         /// For non-anchor channels, this takes into account the cost of the second-stage HTLC
450         /// transactions as well.
451         ///
452         /// When the local commitment transaction is broadcasted as part of a unilateral closure,
453         /// the value of this HTLC will therefore not be claimable but instead burned as a transaction
454         /// fee.
455         ///
456         /// Note that dust limits are specific to each party. An HTLC can be dust for the local
457         /// commitment transaction but not for the counterparty's commitment transaction and vice versa.
458         pub is_dust: bool,
459 }
460
461 impl_writeable_tlv_based!(OutboundHTLCDetails, {
462         (0, htlc_id, required),
463         (2, amount_msat, required),
464         (4, cltv_expiry, required),
465         (6, payment_hash, required),
466         (7, state, upgradable_option),
467         (8, skimmed_fee_msat, required),
468         (10, is_dust, required),
469 });
470
471 /// See AwaitingRemoteRevoke ChannelState for more info
472 #[cfg_attr(test, derive(Clone, Debug, PartialEq))]
473 enum HTLCUpdateAwaitingACK {
474         AddHTLC { // TODO: Time out if we're getting close to cltv_expiry
475                 // always outbound
476                 amount_msat: u64,
477                 cltv_expiry: u32,
478                 payment_hash: PaymentHash,
479                 source: HTLCSource,
480                 onion_routing_packet: msgs::OnionPacket,
481                 // The extra fee we're skimming off the top of this HTLC.
482                 skimmed_fee_msat: Option<u64>,
483                 blinding_point: Option<PublicKey>,
484         },
485         ClaimHTLC {
486                 payment_preimage: PaymentPreimage,
487                 htlc_id: u64,
488         },
489         FailHTLC {
490                 htlc_id: u64,
491                 err_packet: msgs::OnionErrorPacket,
492         },
493         FailMalformedHTLC {
494                 htlc_id: u64,
495                 failure_code: u16,
496                 sha256_of_onion: [u8; 32],
497         },
498 }
499
500 macro_rules! define_state_flags {
501         ($flag_type_doc: expr, $flag_type: ident, [$(($flag_doc: expr, $flag: ident, $value: expr, $get: ident, $set: ident, $clear: ident)),+], $extra_flags: expr) => {
502                 #[doc = $flag_type_doc]
503                 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
504                 struct $flag_type(u32);
505
506                 impl $flag_type {
507                         $(
508                                 #[doc = $flag_doc]
509                                 const $flag: $flag_type = $flag_type($value);
510                         )*
511
512                         /// All flags that apply to the specified [`ChannelState`] variant.
513                         #[allow(unused)]
514                         const ALL: $flag_type = Self($(Self::$flag.0 | )* $extra_flags);
515
516                         #[allow(unused)]
517                         fn new() -> Self { Self(0) }
518
519                         #[allow(unused)]
520                         fn from_u32(flags: u32) -> Result<Self, ()> {
521                                 if flags & !Self::ALL.0 != 0 {
522                                         Err(())
523                                 } else {
524                                         Ok($flag_type(flags))
525                                 }
526                         }
527
528                         #[allow(unused)]
529                         fn is_empty(&self) -> bool { self.0 == 0 }
530                         #[allow(unused)]
531                         fn is_set(&self, flag: Self) -> bool { *self & flag == flag }
532                         #[allow(unused)]
533                         fn set(&mut self, flag: Self) { *self |= flag }
534                         #[allow(unused)]
535                         fn clear(&mut self, flag: Self) -> Self { self.0 &= !flag.0; *self }
536                 }
537
538                 $(
539                         define_state_flags!($flag_type, Self::$flag, $get, $set, $clear);
540                 )*
541
542                 impl core::ops::BitOr for $flag_type {
543                         type Output = Self;
544                         fn bitor(self, rhs: Self) -> Self::Output { Self(self.0 | rhs.0) }
545                 }
546                 impl core::ops::BitOrAssign for $flag_type {
547                         fn bitor_assign(&mut self, rhs: Self) { self.0 |= rhs.0; }
548                 }
549                 impl core::ops::BitAnd for $flag_type {
550                         type Output = Self;
551                         fn bitand(self, rhs: Self) -> Self::Output { Self(self.0 & rhs.0) }
552                 }
553                 impl core::ops::BitAndAssign for $flag_type {
554                         fn bitand_assign(&mut self, rhs: Self) { self.0 &= rhs.0; }
555                 }
556         };
557         ($flag_type_doc: expr, $flag_type: ident, $flags: tt) => {
558                 define_state_flags!($flag_type_doc, $flag_type, $flags, 0);
559         };
560         ($flag_type: ident, $flag: expr, $get: ident, $set: ident, $clear: ident) => {
561                 impl $flag_type {
562                         #[allow(unused)]
563                         fn $get(&self) -> bool { self.is_set($flag_type::new() | $flag) }
564                         #[allow(unused)]
565                         fn $set(&mut self) { self.set($flag_type::new() | $flag) }
566                         #[allow(unused)]
567                         fn $clear(&mut self) -> Self { self.clear($flag_type::new() | $flag) }
568                 }
569         };
570         ($flag_type_doc: expr, FUNDED_STATE, $flag_type: ident, $flags: tt) => {
571                 define_state_flags!($flag_type_doc, $flag_type, $flags, FundedStateFlags::ALL.0);
572
573                 define_state_flags!($flag_type, FundedStateFlags::PEER_DISCONNECTED,
574                         is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected);
575                 define_state_flags!($flag_type, FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS,
576                         is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress);
577                 define_state_flags!($flag_type, FundedStateFlags::REMOTE_SHUTDOWN_SENT,
578                         is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent);
579                 define_state_flags!($flag_type, FundedStateFlags::LOCAL_SHUTDOWN_SENT,
580                         is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent);
581
582                 impl core::ops::BitOr<FundedStateFlags> for $flag_type {
583                         type Output = Self;
584                         fn bitor(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 | rhs.0) }
585                 }
586                 impl core::ops::BitOrAssign<FundedStateFlags> for $flag_type {
587                         fn bitor_assign(&mut self, rhs: FundedStateFlags) { self.0 |= rhs.0; }
588                 }
589                 impl core::ops::BitAnd<FundedStateFlags> for $flag_type {
590                         type Output = Self;
591                         fn bitand(self, rhs: FundedStateFlags) -> Self::Output { Self(self.0 & rhs.0) }
592                 }
593                 impl core::ops::BitAndAssign<FundedStateFlags> for $flag_type {
594                         fn bitand_assign(&mut self, rhs: FundedStateFlags) { self.0 &= rhs.0; }
595                 }
596                 impl PartialEq<FundedStateFlags> for $flag_type {
597                         fn eq(&self, other: &FundedStateFlags) -> bool { self.0 == other.0 }
598                 }
599                 impl From<FundedStateFlags> for $flag_type {
600                         fn from(flags: FundedStateFlags) -> Self { Self(flags.0) }
601                 }
602         };
603 }
604
605 /// We declare all the states/flags here together to help determine which bits are still available
606 /// to choose.
607 mod state_flags {
608         pub const OUR_INIT_SENT: u32 = 1 << 0;
609         pub const THEIR_INIT_SENT: u32 = 1 << 1;
610         pub const FUNDING_NEGOTIATED: u32 = 1 << 2;
611         pub const AWAITING_CHANNEL_READY: u32 = 1 << 3;
612         pub const THEIR_CHANNEL_READY: u32 = 1 << 4;
613         pub const OUR_CHANNEL_READY: u32 = 1 << 5;
614         pub const CHANNEL_READY: u32 = 1 << 6;
615         pub const PEER_DISCONNECTED: u32 = 1 << 7;
616         pub const MONITOR_UPDATE_IN_PROGRESS: u32 = 1 << 8;
617         pub const AWAITING_REMOTE_REVOKE: u32 = 1 << 9;
618         pub const REMOTE_SHUTDOWN_SENT: u32 = 1 << 10;
619         pub const LOCAL_SHUTDOWN_SENT: u32 = 1 << 11;
620         pub const SHUTDOWN_COMPLETE: u32 = 1 << 12;
621         pub const WAITING_FOR_BATCH: u32 = 1 << 13;
622 }
623
624 define_state_flags!(
625         "Flags that apply to all [`ChannelState`] variants in which the channel is funded.",
626         FundedStateFlags, [
627                 ("Indicates the remote side is considered \"disconnected\" and no updates are allowed \
628                         until after we've done a `channel_reestablish` dance.", PEER_DISCONNECTED, state_flags::PEER_DISCONNECTED,
629                         is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected),
630                 ("Indicates the user has told us a `ChannelMonitor` update is pending async persistence \
631                         somewhere and we should pause sending any outbound messages until they've managed to \
632                         complete it.", MONITOR_UPDATE_IN_PROGRESS, state_flags::MONITOR_UPDATE_IN_PROGRESS,
633                         is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress),
634                 ("Indicates we received a `shutdown` message from the remote end. If set, they may not add \
635                         any new HTLCs to the channel, and we are expected to respond with our own `shutdown` \
636                         message when possible.", REMOTE_SHUTDOWN_SENT, state_flags::REMOTE_SHUTDOWN_SENT,
637                         is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent),
638                 ("Indicates we sent a `shutdown` message. At this point, we may not add any new HTLCs to \
639                         the channel.", LOCAL_SHUTDOWN_SENT, state_flags::LOCAL_SHUTDOWN_SENT,
640                         is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent)
641         ]
642 );
643
644 define_state_flags!(
645         "Flags that only apply to [`ChannelState::NegotiatingFunding`].",
646         NegotiatingFundingFlags, [
647                 ("Indicates we have (or are prepared to) send our `open_channel`/`accept_channel` message.",
648                         OUR_INIT_SENT, state_flags::OUR_INIT_SENT, is_our_init_sent, set_our_init_sent, clear_our_init_sent),
649                 ("Indicates we have received their `open_channel`/`accept_channel` message.",
650                         THEIR_INIT_SENT, state_flags::THEIR_INIT_SENT, is_their_init_sent, set_their_init_sent, clear_their_init_sent)
651         ]
652 );
653
654 define_state_flags!(
655         "Flags that only apply to [`ChannelState::AwaitingChannelReady`].",
656         FUNDED_STATE, AwaitingChannelReadyFlags, [
657                 ("Indicates they sent us a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
658                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
659                         THEIR_CHANNEL_READY, state_flags::THEIR_CHANNEL_READY,
660                         is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready),
661                 ("Indicates we sent them a `channel_ready` message. Once both `THEIR_CHANNEL_READY` and \
662                         `OUR_CHANNEL_READY` are set, our state moves on to `ChannelReady`.",
663                         OUR_CHANNEL_READY, state_flags::OUR_CHANNEL_READY,
664                         is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready),
665                 ("Indicates the channel was funded in a batch and the broadcast of the funding transaction \
666                         is being held until all channels in the batch have received `funding_signed` and have \
667                         their monitors persisted.", WAITING_FOR_BATCH, state_flags::WAITING_FOR_BATCH,
668                         is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch)
669         ]
670 );
671
672 define_state_flags!(
673         "Flags that only apply to [`ChannelState::ChannelReady`].",
674         FUNDED_STATE, ChannelReadyFlags, [
675                 ("Indicates that we have sent a `commitment_signed` but are awaiting the responding \
676                         `revoke_and_ack` message. During this period, we can't generate new `commitment_signed` \
677                         messages as we'd be unable to determine which HTLCs they included in their `revoke_and_ack` \
678                         implicit ACK, so instead we have to hold them away temporarily to be sent later.",
679                         AWAITING_REMOTE_REVOKE, state_flags::AWAITING_REMOTE_REVOKE,
680                         is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke)
681         ]
682 );
683
684 // Note that the order of this enum is implicitly defined by where each variant is placed. Take this
685 // into account when introducing new states and update `test_channel_state_order` accordingly.
686 #[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq)]
687 enum ChannelState {
688         /// We are negotiating the parameters required for the channel prior to funding it.
689         NegotiatingFunding(NegotiatingFundingFlags),
690         /// We have sent `funding_created` and are awaiting a `funding_signed` to advance to
691         /// `AwaitingChannelReady`. Note that this is nonsense for an inbound channel as we immediately generate
692         /// `funding_signed` upon receipt of `funding_created`, so simply skip this state.
693         FundingNegotiated,
694         /// We've received/sent `funding_created` and `funding_signed` and are thus now waiting on the
695         /// funding transaction to confirm.
696         AwaitingChannelReady(AwaitingChannelReadyFlags),
697         /// Both we and our counterparty consider the funding transaction confirmed and the channel is
698         /// now operational.
699         ChannelReady(ChannelReadyFlags),
700         /// We've successfully negotiated a `closing_signed` dance. At this point, the `ChannelManager`
701         /// is about to drop us, but we store this anyway.
702         ShutdownComplete,
703 }
704
705 macro_rules! impl_state_flag {
706         ($get: ident, $set: ident, $clear: ident, [$($state: ident),+]) => {
707                 #[allow(unused)]
708                 fn $get(&self) -> bool {
709                         match self {
710                                 $(
711                                         ChannelState::$state(flags) => flags.$get(),
712                                 )*
713                                 _ => false,
714                         }
715                 }
716                 #[allow(unused)]
717                 fn $set(&mut self) {
718                         match self {
719                                 $(
720                                         ChannelState::$state(flags) => flags.$set(),
721                                 )*
722                                 _ => debug_assert!(false, "Attempted to set flag on unexpected ChannelState"),
723                         }
724                 }
725                 #[allow(unused)]
726                 fn $clear(&mut self) {
727                         match self {
728                                 $(
729                                         ChannelState::$state(flags) => { let _ = flags.$clear(); },
730                                 )*
731                                 _ => debug_assert!(false, "Attempted to clear flag on unexpected ChannelState"),
732                         }
733                 }
734         };
735         ($get: ident, $set: ident, $clear: ident, FUNDED_STATES) => {
736                 impl_state_flag!($get, $set, $clear, [AwaitingChannelReady, ChannelReady]);
737         };
738         ($get: ident, $set: ident, $clear: ident, $state: ident) => {
739                 impl_state_flag!($get, $set, $clear, [$state]);
740         };
741 }
742
743 impl ChannelState {
744         fn from_u32(state: u32) -> Result<Self, ()> {
745                 match state {
746                         state_flags::FUNDING_NEGOTIATED => Ok(ChannelState::FundingNegotiated),
747                         state_flags::SHUTDOWN_COMPLETE => Ok(ChannelState::ShutdownComplete),
748                         val => {
749                                 if val & state_flags::AWAITING_CHANNEL_READY == state_flags::AWAITING_CHANNEL_READY {
750                                         AwaitingChannelReadyFlags::from_u32(val & !state_flags::AWAITING_CHANNEL_READY)
751                                                 .map(|flags| ChannelState::AwaitingChannelReady(flags))
752                                 } else if val & state_flags::CHANNEL_READY == state_flags::CHANNEL_READY {
753                                         ChannelReadyFlags::from_u32(val & !state_flags::CHANNEL_READY)
754                                                 .map(|flags| ChannelState::ChannelReady(flags))
755                                 } else if let Ok(flags) = NegotiatingFundingFlags::from_u32(val) {
756                                         Ok(ChannelState::NegotiatingFunding(flags))
757                                 } else {
758                                         Err(())
759                                 }
760                         },
761                 }
762         }
763
764         fn to_u32(&self) -> u32 {
765                 match self {
766                         ChannelState::NegotiatingFunding(flags) => flags.0,
767                         ChannelState::FundingNegotiated => state_flags::FUNDING_NEGOTIATED,
768                         ChannelState::AwaitingChannelReady(flags) => state_flags::AWAITING_CHANNEL_READY | flags.0,
769                         ChannelState::ChannelReady(flags) => state_flags::CHANNEL_READY | flags.0,
770                         ChannelState::ShutdownComplete => state_flags::SHUTDOWN_COMPLETE,
771                 }
772         }
773
774         fn is_pre_funded_state(&self) -> bool {
775                 matches!(self, ChannelState::NegotiatingFunding(_)|ChannelState::FundingNegotiated)
776         }
777
778         fn is_both_sides_shutdown(&self) -> bool {
779                 self.is_local_shutdown_sent() && self.is_remote_shutdown_sent()
780         }
781
782         fn with_funded_state_flags_mask(&self) -> FundedStateFlags {
783                 match self {
784                         ChannelState::AwaitingChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
785                         ChannelState::ChannelReady(flags) => FundedStateFlags((*flags & FundedStateFlags::ALL).0),
786                         _ => FundedStateFlags::new(),
787                 }
788         }
789
790         fn can_generate_new_commitment(&self) -> bool {
791                 match self {
792                         ChannelState::ChannelReady(flags) =>
793                                 !flags.is_set(ChannelReadyFlags::AWAITING_REMOTE_REVOKE) &&
794                                         !flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) &&
795                                         !flags.is_set(FundedStateFlags::PEER_DISCONNECTED.into()),
796                         _ => {
797                                 debug_assert!(false, "Can only generate new commitment within ChannelReady");
798                                 false
799                         },
800                 }
801         }
802
803         impl_state_flag!(is_peer_disconnected, set_peer_disconnected, clear_peer_disconnected, FUNDED_STATES);
804         impl_state_flag!(is_monitor_update_in_progress, set_monitor_update_in_progress, clear_monitor_update_in_progress, FUNDED_STATES);
805         impl_state_flag!(is_local_shutdown_sent, set_local_shutdown_sent, clear_local_shutdown_sent, FUNDED_STATES);
806         impl_state_flag!(is_remote_shutdown_sent, set_remote_shutdown_sent, clear_remote_shutdown_sent, FUNDED_STATES);
807         impl_state_flag!(is_our_channel_ready, set_our_channel_ready, clear_our_channel_ready, AwaitingChannelReady);
808         impl_state_flag!(is_their_channel_ready, set_their_channel_ready, clear_their_channel_ready, AwaitingChannelReady);
809         impl_state_flag!(is_waiting_for_batch, set_waiting_for_batch, clear_waiting_for_batch, AwaitingChannelReady);
810         impl_state_flag!(is_awaiting_remote_revoke, set_awaiting_remote_revoke, clear_awaiting_remote_revoke, ChannelReady);
811 }
812
813 pub const INITIAL_COMMITMENT_NUMBER: u64 = (1 << 48) - 1;
814
815 pub const DEFAULT_MAX_HTLCS: u16 = 50;
816
817 pub(crate) fn commitment_tx_base_weight(channel_type_features: &ChannelTypeFeatures) -> u64 {
818         const COMMITMENT_TX_BASE_WEIGHT: u64 = 724;
819         const COMMITMENT_TX_BASE_ANCHOR_WEIGHT: u64 = 1124;
820         if channel_type_features.supports_anchors_zero_fee_htlc_tx() { COMMITMENT_TX_BASE_ANCHOR_WEIGHT } else { COMMITMENT_TX_BASE_WEIGHT }
821 }
822
823 #[cfg(not(test))]
824 const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
825 #[cfg(test)]
826 pub const COMMITMENT_TX_WEIGHT_PER_HTLC: u64 = 172;
827
828 pub const ANCHOR_OUTPUT_VALUE_SATOSHI: u64 = 330;
829
830 /// The percentage of the channel value `holder_max_htlc_value_in_flight_msat` used to be set to,
831 /// before this was made configurable. The percentage was made configurable in LDK 0.0.107,
832 /// although LDK 0.0.104+ enabled serialization of channels with a different value set for
833 /// `holder_max_htlc_value_in_flight_msat`.
834 pub const MAX_IN_FLIGHT_PERCENT_LEGACY: u8 = 10;
835
836 /// Maximum `funding_satoshis` value according to the BOLT #2 specification, if
837 /// `option_support_large_channel` (aka wumbo channels) is not supported.
838 /// It's 2^24 - 1.
839 pub const MAX_FUNDING_SATOSHIS_NO_WUMBO: u64 = (1 << 24) - 1;
840
841 /// Total bitcoin supply in satoshis.
842 pub const TOTAL_BITCOIN_SUPPLY_SATOSHIS: u64 = 21_000_000 * 1_0000_0000;
843
844 /// The maximum network dust limit for standard script formats. This currently represents the
845 /// minimum output value for a P2SH output before Bitcoin Core 22 considers the entire
846 /// transaction non-standard and thus refuses to relay it.
847 /// We also use this as the maximum counterparty `dust_limit_satoshis` allowed, given many
848 /// implementations use this value for their dust limit today.
849 pub const MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS: u64 = 546;
850
851 /// The maximum channel dust limit we will accept from our counterparty.
852 pub const MAX_CHAN_DUST_LIMIT_SATOSHIS: u64 = MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS;
853
854 /// The dust limit is used for both the commitment transaction outputs as well as the closing
855 /// transactions. For cooperative closing transactions, we require segwit outputs, though accept
856 /// *any* segwit scripts, which are allowed to be up to 42 bytes in length.
857 /// In order to avoid having to concern ourselves with standardness during the closing process, we
858 /// simply require our counterparty to use a dust limit which will leave any segwit output
859 /// standard.
860 /// See <https://github.com/lightning/bolts/issues/905> for more details.
861 pub const MIN_CHAN_DUST_LIMIT_SATOSHIS: u64 = 354;
862
863 // Just a reasonable implementation-specific safe lower bound, higher than the dust limit.
864 pub const MIN_THEIR_CHAN_RESERVE_SATOSHIS: u64 = 1000;
865
866 /// Used to return a simple Error back to ChannelManager. Will get converted to a
867 /// msgs::ErrorAction::SendErrorMessage or msgs::ErrorAction::IgnoreError as appropriate with our
868 /// channel_id in ChannelManager.
869 pub(super) enum ChannelError {
870         Ignore(String),
871         Warn(String),
872         Close(String),
873 }
874
875 impl fmt::Debug for ChannelError {
876         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
877                 match self {
878                         &ChannelError::Ignore(ref e) => write!(f, "Ignore : {}", e),
879                         &ChannelError::Warn(ref e) => write!(f, "Warn : {}", e),
880                         &ChannelError::Close(ref e) => write!(f, "Close : {}", e),
881                 }
882         }
883 }
884
885 impl fmt::Display for ChannelError {
886         fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
887                 match self {
888                         &ChannelError::Ignore(ref e) => write!(f, "{}", e),
889                         &ChannelError::Warn(ref e) => write!(f, "{}", e),
890                         &ChannelError::Close(ref e) => write!(f, "{}", e),
891                 }
892         }
893 }
894
895 pub(super) struct WithChannelContext<'a, L: Deref> where L::Target: Logger {
896         pub logger: &'a L,
897         pub peer_id: Option<PublicKey>,
898         pub channel_id: Option<ChannelId>,
899 }
900
901 impl<'a, L: Deref> Logger for WithChannelContext<'a, L> where L::Target: Logger {
902         fn log(&self, mut record: Record) {
903                 record.peer_id = self.peer_id;
904                 record.channel_id = self.channel_id;
905                 self.logger.log(record)
906         }
907 }
908
909 impl<'a, 'b, L: Deref> WithChannelContext<'a, L>
910 where L::Target: Logger {
911         pub(super) fn from<S: Deref>(logger: &'a L, context: &'b ChannelContext<S>) -> Self
912         where S::Target: SignerProvider
913         {
914                 WithChannelContext {
915                         logger,
916                         peer_id: Some(context.counterparty_node_id),
917                         channel_id: Some(context.channel_id),
918                 }
919         }
920 }
921
922 macro_rules! secp_check {
923         ($res: expr, $err: expr) => {
924                 match $res {
925                         Ok(thing) => thing,
926                         Err(_) => return Err(ChannelError::Close($err)),
927                 }
928         };
929 }
930
931 /// The "channel disabled" bit in channel_update must be set based on whether we are connected to
932 /// our counterparty or not. However, we don't want to announce updates right away to avoid
933 /// spamming the network with updates if the connection is flapping. Instead, we "stage" updates to
934 /// our channel_update message and track the current state here.
935 /// See implementation at [`super::channelmanager::ChannelManager::timer_tick_occurred`].
936 #[derive(Clone, Copy, PartialEq)]
937 pub(super) enum ChannelUpdateStatus {
938         /// We've announced the channel as enabled and are connected to our peer.
939         Enabled,
940         /// Our channel is no longer live, but we haven't announced the channel as disabled yet.
941         DisabledStaged(u8),
942         /// Our channel is live again, but we haven't announced the channel as enabled yet.
943         EnabledStaged(u8),
944         /// We've announced the channel as disabled.
945         Disabled,
946 }
947
948 /// We track when we sent an `AnnouncementSignatures` to our peer in a few states, described here.
949 #[derive(PartialEq)]
950 pub enum AnnouncementSigsState {
951         /// We have not sent our peer an `AnnouncementSignatures` yet, or our peer disconnected since
952         /// we sent the last `AnnouncementSignatures`.
953         NotSent,
954         /// We sent an `AnnouncementSignatures` to our peer since the last time our peer disconnected.
955         /// This state never appears on disk - instead we write `NotSent`.
956         MessageSent,
957         /// We sent a `CommitmentSigned` after the last `AnnouncementSignatures` we sent. Because we
958         /// only ever have a single `CommitmentSigned` pending at once, if we sent one after sending
959         /// `AnnouncementSignatures` then we know the peer received our `AnnouncementSignatures` if
960         /// they send back a `RevokeAndACK`.
961         /// This state never appears on disk - instead we write `NotSent`.
962         Committed,
963         /// We received a `RevokeAndACK`, effectively ack-ing our `AnnouncementSignatures`, at this
964         /// point we no longer need to re-send our `AnnouncementSignatures` again on reconnect.
965         PeerReceived,
966 }
967
968 /// An enum indicating whether the local or remote side offered a given HTLC.
969 enum HTLCInitiator {
970         LocalOffered,
971         RemoteOffered,
972 }
973
974 /// An enum gathering stats on pending HTLCs, either inbound or outbound side.
975 struct HTLCStats {
976         pending_htlcs: u32,
977         pending_htlcs_value_msat: u64,
978         on_counterparty_tx_dust_exposure_msat: u64,
979         on_holder_tx_dust_exposure_msat: u64,
980         holding_cell_msat: u64,
981         on_holder_tx_holding_cell_htlcs_count: u32, // dust HTLCs *non*-included
982 }
983
984 /// An enum gathering stats on commitment transaction, either local or remote.
985 struct CommitmentStats<'a> {
986         tx: CommitmentTransaction, // the transaction info
987         feerate_per_kw: u32, // the feerate included to build the transaction
988         total_fee_sat: u64, // the total fee included in the transaction
989         num_nondust_htlcs: usize,  // the number of HTLC outputs (dust HTLCs *non*-included)
990         htlcs_included: Vec<(HTLCOutputInCommitment, Option<&'a HTLCSource>)>, // the list of HTLCs (dust HTLCs *included*) which were not ignored when building the transaction
991         local_balance_msat: u64, // local balance before fees *not* considering dust limits
992         remote_balance_msat: u64, // remote balance before fees *not* considering dust limits
993         outbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful offered HTLCs since last commitment
994         inbound_htlc_preimages: Vec<PaymentPreimage>, // preimages for successful received HTLCs since last commitment
995 }
996
997 /// Used when calculating whether we or the remote can afford an additional HTLC.
998 struct HTLCCandidate {
999         amount_msat: u64,
1000         origin: HTLCInitiator,
1001 }
1002
1003 impl HTLCCandidate {
1004         fn new(amount_msat: u64, origin: HTLCInitiator) -> Self {
1005                 Self {
1006                         amount_msat,
1007                         origin,
1008                 }
1009         }
1010 }
1011
1012 /// A return value enum for get_update_fulfill_htlc. See UpdateFulfillCommitFetch variants for
1013 /// description
1014 enum UpdateFulfillFetch {
1015         NewClaim {
1016                 monitor_update: ChannelMonitorUpdate,
1017                 htlc_value_msat: u64,
1018                 msg: Option<msgs::UpdateFulfillHTLC>,
1019         },
1020         DuplicateClaim {},
1021 }
1022
1023 /// The return type of get_update_fulfill_htlc_and_commit.
1024 pub enum UpdateFulfillCommitFetch {
1025         /// Indicates the HTLC fulfill is new, and either generated an update_fulfill message, placed
1026         /// it in the holding cell, or re-generated the update_fulfill message after the same claim was
1027         /// previously placed in the holding cell (and has since been removed).
1028         NewClaim {
1029                 /// The ChannelMonitorUpdate which places the new payment preimage in the channel monitor
1030                 monitor_update: ChannelMonitorUpdate,
1031                 /// The value of the HTLC which was claimed, in msat.
1032                 htlc_value_msat: u64,
1033         },
1034         /// Indicates the HTLC fulfill is duplicative and already existed either in the holding cell
1035         /// or has been forgotten (presumably previously claimed).
1036         DuplicateClaim {},
1037 }
1038
1039 /// The return value of `monitor_updating_restored`
1040 pub(super) struct MonitorRestoreUpdates {
1041         pub raa: Option<msgs::RevokeAndACK>,
1042         pub commitment_update: Option<msgs::CommitmentUpdate>,
1043         pub order: RAACommitmentOrder,
1044         pub accepted_htlcs: Vec<(PendingHTLCInfo, u64)>,
1045         pub failed_htlcs: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1046         pub finalized_claimed_htlcs: Vec<HTLCSource>,
1047         pub funding_broadcastable: Option<Transaction>,
1048         pub channel_ready: Option<msgs::ChannelReady>,
1049         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1050 }
1051
1052 /// The return value of `signer_maybe_unblocked`
1053 #[allow(unused)]
1054 pub(super) struct SignerResumeUpdates {
1055         pub commitment_update: Option<msgs::CommitmentUpdate>,
1056         pub funding_signed: Option<msgs::FundingSigned>,
1057         pub channel_ready: Option<msgs::ChannelReady>,
1058 }
1059
1060 /// The return value of `channel_reestablish`
1061 pub(super) struct ReestablishResponses {
1062         pub channel_ready: Option<msgs::ChannelReady>,
1063         pub raa: Option<msgs::RevokeAndACK>,
1064         pub commitment_update: Option<msgs::CommitmentUpdate>,
1065         pub order: RAACommitmentOrder,
1066         pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
1067         pub shutdown_msg: Option<msgs::Shutdown>,
1068 }
1069
1070 /// The result of a shutdown that should be handled.
1071 #[must_use]
1072 pub(crate) struct ShutdownResult {
1073         pub(crate) closure_reason: ClosureReason,
1074         /// A channel monitor update to apply.
1075         pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>,
1076         /// A list of dropped outbound HTLCs that can safely be failed backwards immediately.
1077         pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>,
1078         /// An unbroadcasted batch funding transaction id. The closure of this channel should be
1079         /// propagated to the remainder of the batch.
1080         pub(crate) unbroadcasted_batch_funding_txid: Option<Txid>,
1081         pub(crate) channel_id: ChannelId,
1082         pub(crate) user_channel_id: u128,
1083         pub(crate) channel_capacity_satoshis: u64,
1084         pub(crate) counterparty_node_id: PublicKey,
1085         pub(crate) unbroadcasted_funding_tx: Option<Transaction>,
1086         pub(crate) channel_funding_txo: Option<OutPoint>,
1087 }
1088
1089 /// If the majority of the channels funds are to the fundee and the initiator holds only just
1090 /// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the
1091 /// initiator controls the feerate, if they then go to increase the channel fee, they may have no
1092 /// balance but the fundee is unable to send a payment as the increase in fee more than drains
1093 /// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless.
1094 /// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase
1095 /// by this multiple without hitting this case, before sending.
1096 /// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over
1097 /// the channel. Sadly, there isn't really a good number for this - if we expect to have no new
1098 /// HTLCs for days we may need this to suffice for feerate increases across days, but that may
1099 /// leave the channel less usable as we hold a bigger reserve.
1100 #[cfg(any(fuzzing, test))]
1101 pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1102 #[cfg(not(any(fuzzing, test)))]
1103 const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2;
1104
1105 /// If we fail to see a funding transaction confirmed on-chain within this many blocks after the
1106 /// channel creation on an inbound channel, we simply force-close and move on.
1107 /// This constant is the one suggested in BOLT 2.
1108 pub(crate) const FUNDING_CONF_DEADLINE_BLOCKS: u32 = 2016;
1109
1110 /// In case of a concurrent update_add_htlc proposed by our counterparty, we might
1111 /// not have enough balance value remaining to cover the onchain cost of this new
1112 /// HTLC weight. If this happens, our counterparty fails the reception of our
1113 /// commitment_signed including this new HTLC due to infringement on the channel
1114 /// reserve.
1115 /// To prevent this case, we compute our outbound update_fee with an HTLC buffer of
1116 /// size 2. However, if the number of concurrent update_add_htlc is higher, this still
1117 /// leads to a channel force-close. Ultimately, this is an issue coming from the
1118 /// design of LN state machines, allowing asynchronous updates.
1119 pub(crate) const CONCURRENT_INBOUND_HTLC_FEE_BUFFER: u32 = 2;
1120
1121 /// When a channel is opened, we check that the funding amount is enough to pay for relevant
1122 /// commitment transaction fees, with at least this many HTLCs present on the commitment
1123 /// transaction (not counting the value of the HTLCs themselves).
1124 pub(crate) const MIN_AFFORDABLE_HTLC_COUNT: usize = 4;
1125
1126 /// When a [`Channel`] has its [`ChannelConfig`] updated, its existing one is stashed for up to this
1127 /// number of ticks to allow forwarding HTLCs by nodes that have yet to receive the new
1128 /// ChannelUpdate prompted by the config update. This value was determined as follows:
1129 ///
1130 ///   * The expected interval between ticks (1 minute).
1131 ///   * The average convergence delay of updates across the network, i.e., ~300 seconds on average
1132 ///      for a node to see an update as seen on `<https://arxiv.org/pdf/2205.12737.pdf>`.
1133 ///   * `EXPIRE_PREV_CONFIG_TICKS` = convergence_delay / tick_interval
1134 pub(crate) const EXPIRE_PREV_CONFIG_TICKS: usize = 5;
1135
1136 /// The number of ticks that may elapse while we're waiting for a response to a
1137 /// [`msgs::RevokeAndACK`] or [`msgs::ChannelReestablish`] message before we attempt to disconnect
1138 /// them.
1139 ///
1140 /// See [`ChannelContext::sent_message_awaiting_response`] for more information.
1141 pub(crate) const DISCONNECT_PEER_AWAITING_RESPONSE_TICKS: usize = 2;
1142
1143 /// The number of ticks that may elapse while we're waiting for an unfunded outbound/inbound channel
1144 /// to be promoted to a [`Channel`] since the unfunded channel was created. An unfunded channel
1145 /// exceeding this age limit will be force-closed and purged from memory.
1146 pub(crate) const UNFUNDED_CHANNEL_AGE_LIMIT_TICKS: usize = 60;
1147
1148 /// Number of blocks needed for an output from a coinbase transaction to be spendable.
1149 pub(crate) const COINBASE_MATURITY: u32 = 100;
1150
1151 struct PendingChannelMonitorUpdate {
1152         update: ChannelMonitorUpdate,
1153 }
1154
1155 impl_writeable_tlv_based!(PendingChannelMonitorUpdate, {
1156         (0, update, required),
1157 });
1158
1159 /// The `ChannelPhase` enum describes the current phase in life of a lightning channel with each of
1160 /// its variants containing an appropriate channel struct.
1161 pub(super) enum ChannelPhase<SP: Deref> where SP::Target: SignerProvider {
1162         UnfundedOutboundV1(OutboundV1Channel<SP>),
1163         UnfundedInboundV1(InboundV1Channel<SP>),
1164         Funded(Channel<SP>),
1165 }
1166
1167 impl<'a, SP: Deref> ChannelPhase<SP> where
1168         SP::Target: SignerProvider,
1169         <SP::Target as SignerProvider>::EcdsaSigner: ChannelSigner,
1170 {
1171         pub fn context(&'a self) -> &'a ChannelContext<SP> {
1172                 match self {
1173                         ChannelPhase::Funded(chan) => &chan.context,
1174                         ChannelPhase::UnfundedOutboundV1(chan) => &chan.context,
1175                         ChannelPhase::UnfundedInboundV1(chan) => &chan.context,
1176                 }
1177         }
1178
1179         pub fn context_mut(&'a mut self) -> &'a mut ChannelContext<SP> {
1180                 match self {
1181                         ChannelPhase::Funded(ref mut chan) => &mut chan.context,
1182                         ChannelPhase::UnfundedOutboundV1(ref mut chan) => &mut chan.context,
1183                         ChannelPhase::UnfundedInboundV1(ref mut chan) => &mut chan.context,
1184                 }
1185         }
1186 }
1187
1188 /// Contains all state common to unfunded inbound/outbound channels.
1189 pub(super) struct UnfundedChannelContext {
1190         /// A counter tracking how many ticks have elapsed since this unfunded channel was
1191         /// created. If this unfunded channel reaches peer has yet to respond after reaching
1192         /// `UNFUNDED_CHANNEL_AGE_LIMIT_TICKS`, it will be force-closed and purged from memory.
1193         ///
1194         /// This is so that we don't keep channels around that haven't progressed to a funded state
1195         /// in a timely manner.
1196         unfunded_channel_age_ticks: usize,
1197 }
1198
1199 impl UnfundedChannelContext {
1200         /// Determines whether we should force-close and purge this unfunded channel from memory due to it
1201         /// having reached the unfunded channel age limit.
1202         ///
1203         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
1204         pub fn should_expire_unfunded_channel(&mut self) -> bool {
1205                 self.unfunded_channel_age_ticks += 1;
1206                 self.unfunded_channel_age_ticks >= UNFUNDED_CHANNEL_AGE_LIMIT_TICKS
1207         }
1208 }
1209
1210 /// Contains everything about the channel including state, and various flags.
1211 pub(super) struct ChannelContext<SP: Deref> where SP::Target: SignerProvider {
1212         config: LegacyChannelConfig,
1213
1214         // Track the previous `ChannelConfig` so that we can continue forwarding HTLCs that were
1215         // constructed using it. The second element in the tuple corresponds to the number of ticks that
1216         // have elapsed since the update occurred.
1217         prev_config: Option<(ChannelConfig, usize)>,
1218
1219         inbound_handshake_limits_override: Option<ChannelHandshakeLimits>,
1220
1221         user_id: u128,
1222
1223         /// The current channel ID.
1224         channel_id: ChannelId,
1225         /// The temporary channel ID used during channel setup. Value kept even after transitioning to a final channel ID.
1226         /// Will be `None` for channels created prior to 0.0.115.
1227         temporary_channel_id: Option<ChannelId>,
1228         channel_state: ChannelState,
1229
1230         // When we reach max(6 blocks, minimum_depth), we need to send an AnnouncementSigs message to
1231         // our peer. However, we want to make sure they received it, or else rebroadcast it when we
1232         // next connect.
1233         // We do so here, see `AnnouncementSigsSent` for more details on the state(s).
1234         // Note that a number of our tests were written prior to the behavior here which retransmits
1235         // AnnouncementSignatures until after an RAA completes, so the behavior is short-circuited in
1236         // many tests.
1237         #[cfg(any(test, feature = "_test_utils"))]
1238         pub(crate) announcement_sigs_state: AnnouncementSigsState,
1239         #[cfg(not(any(test, feature = "_test_utils")))]
1240         announcement_sigs_state: AnnouncementSigsState,
1241
1242         secp_ctx: Secp256k1<secp256k1::All>,
1243         channel_value_satoshis: u64,
1244
1245         latest_monitor_update_id: u64,
1246
1247         holder_signer: ChannelSignerType<SP>,
1248         shutdown_scriptpubkey: Option<ShutdownScript>,
1249         destination_script: ScriptBuf,
1250
1251         // Our commitment numbers start at 2^48-1 and count down, whereas the ones used in transaction
1252         // generation start at 0 and count up...this simplifies some parts of implementation at the
1253         // cost of others, but should really just be changed.
1254
1255         cur_holder_commitment_transaction_number: u64,
1256         cur_counterparty_commitment_transaction_number: u64,
1257         value_to_self_msat: u64, // Excluding all pending_htlcs, fees, and anchor outputs
1258         pending_inbound_htlcs: Vec<InboundHTLCOutput>,
1259         pending_outbound_htlcs: Vec<OutboundHTLCOutput>,
1260         holding_cell_htlc_updates: Vec<HTLCUpdateAwaitingACK>,
1261
1262         /// When resending CS/RAA messages on channel monitor restoration or on reconnect, we always
1263         /// need to ensure we resend them in the order we originally generated them. Note that because
1264         /// there can only ever be one in-flight CS and/or one in-flight RAA at any time, it is
1265         /// sufficient to simply set this to the opposite of any message we are generating as we
1266         /// generate it. ie when we generate a CS, we set this to RAAFirst as, if there is a pending
1267         /// in-flight RAA to resend, it will have been the first thing we generated, and thus we should
1268         /// send it first.
1269         resend_order: RAACommitmentOrder,
1270
1271         monitor_pending_channel_ready: bool,
1272         monitor_pending_revoke_and_ack: bool,
1273         monitor_pending_commitment_signed: bool,
1274
1275         // TODO: If a channel is drop'd, we don't know whether the `ChannelMonitor` is ultimately
1276         // responsible for some of the HTLCs here or not - we don't know whether the update in question
1277         // completed or not. We currently ignore these fields entirely when force-closing a channel,
1278         // but need to handle this somehow or we run the risk of losing HTLCs!
1279         monitor_pending_forwards: Vec<(PendingHTLCInfo, u64)>,
1280         monitor_pending_failures: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
1281         monitor_pending_finalized_fulfills: Vec<HTLCSource>,
1282
1283         /// If we went to send a commitment update (ie some messages then [`msgs::CommitmentSigned`])
1284         /// but our signer (initially) refused to give us a signature, we should retry at some point in
1285         /// the future when the signer indicates it may have a signature for us.
1286         ///
1287         /// This flag is set in such a case. Note that we don't need to persist this as we'll end up
1288         /// setting it again as a side-effect of [`Channel::channel_reestablish`].
1289         signer_pending_commitment_update: bool,
1290         /// Similar to [`Self::signer_pending_commitment_update`] but we're waiting to send either a
1291         /// [`msgs::FundingCreated`] or [`msgs::FundingSigned`] depending on if this channel is
1292         /// outbound or inbound.
1293         signer_pending_funding: bool,
1294
1295         // pending_update_fee is filled when sending and receiving update_fee.
1296         //
1297         // Because it follows the same commitment flow as HTLCs, `FeeUpdateState` is either `Outbound`
1298         // or matches a subset of the `InboundHTLCOutput` variants. It is then updated/used when
1299         // generating new commitment transactions with exactly the same criteria as inbound/outbound
1300         // HTLCs with similar state.
1301         pending_update_fee: Option<(u32, FeeUpdateState)>,
1302         // If a `send_update_fee()` call is made with ChannelState::AwaitingRemoteRevoke set, we place
1303         // it here instead of `pending_update_fee` in the same way as we place outbound HTLC updates in
1304         // `holding_cell_htlc_updates` instead of `pending_outbound_htlcs`. It is released into
1305         // `pending_update_fee` with the same criteria as outbound HTLC updates but can be updated by
1306         // further `send_update_fee` calls, dropping the previous holding cell update entirely.
1307         holding_cell_update_fee: Option<u32>,
1308         next_holder_htlc_id: u64,
1309         next_counterparty_htlc_id: u64,
1310         feerate_per_kw: u32,
1311
1312         /// The timestamp set on our latest `channel_update` message for this channel. It is updated
1313         /// when the channel is updated in ways which may impact the `channel_update` message or when a
1314         /// new block is received, ensuring it's always at least moderately close to the current real
1315         /// time.
1316         update_time_counter: u32,
1317
1318         #[cfg(debug_assertions)]
1319         /// Max to_local and to_remote outputs in a locally-generated commitment transaction
1320         holder_max_commitment_tx_output: Mutex<(u64, u64)>,
1321         #[cfg(debug_assertions)]
1322         /// Max to_local and to_remote outputs in a remote-generated commitment transaction
1323         counterparty_max_commitment_tx_output: Mutex<(u64, u64)>,
1324
1325         last_sent_closing_fee: Option<(u64, Signature)>, // (fee, holder_sig)
1326         target_closing_feerate_sats_per_kw: Option<u32>,
1327
1328         /// If our counterparty sent us a closing_signed while we were waiting for a `ChannelMonitor`
1329         /// update, we need to delay processing it until later. We do that here by simply storing the
1330         /// closing_signed message and handling it in `maybe_propose_closing_signed`.
1331         pending_counterparty_closing_signed: Option<msgs::ClosingSigned>,
1332
1333         /// The minimum and maximum absolute fee, in satoshis, we are willing to place on the closing
1334         /// transaction. These are set once we reach `closing_negotiation_ready`.
1335         #[cfg(test)]
1336         pub(crate) closing_fee_limits: Option<(u64, u64)>,
1337         #[cfg(not(test))]
1338         closing_fee_limits: Option<(u64, u64)>,
1339
1340         /// If we remove an HTLC (or fee update), commit, and receive our counterparty's
1341         /// `revoke_and_ack`, we remove all knowledge of said HTLC (or fee update). However, the latest
1342         /// local commitment transaction that we can broadcast still contains the HTLC (or old fee)
1343         /// until we receive a further `commitment_signed`. Thus we are not eligible for initiating the
1344         /// `closing_signed` negotiation if we're expecting a counterparty `commitment_signed`.
1345         ///
1346         /// To ensure we don't send a `closing_signed` too early, we track this state here, waiting
1347         /// until we see a `commitment_signed` before doing so.
1348         ///
1349         /// We don't bother to persist this - we anticipate this state won't last longer than a few
1350         /// milliseconds, so any accidental force-closes here should be exceedingly rare.
1351         expecting_peer_commitment_signed: bool,
1352
1353         /// The hash of the block in which the funding transaction was included.
1354         funding_tx_confirmed_in: Option<BlockHash>,
1355         funding_tx_confirmation_height: u32,
1356         short_channel_id: Option<u64>,
1357         /// Either the height at which this channel was created or the height at which it was last
1358         /// serialized if it was serialized by versions prior to 0.0.103.
1359         /// We use this to close if funding is never broadcasted.
1360         channel_creation_height: u32,
1361
1362         counterparty_dust_limit_satoshis: u64,
1363
1364         #[cfg(test)]
1365         pub(super) holder_dust_limit_satoshis: u64,
1366         #[cfg(not(test))]
1367         holder_dust_limit_satoshis: u64,
1368
1369         #[cfg(test)]
1370         pub(super) counterparty_max_htlc_value_in_flight_msat: u64,
1371         #[cfg(not(test))]
1372         counterparty_max_htlc_value_in_flight_msat: u64,
1373
1374         #[cfg(test)]
1375         pub(super) holder_max_htlc_value_in_flight_msat: u64,
1376         #[cfg(not(test))]
1377         holder_max_htlc_value_in_flight_msat: u64,
1378
1379         /// minimum channel reserve for self to maintain - set by them.
1380         counterparty_selected_channel_reserve_satoshis: Option<u64>,
1381
1382         #[cfg(test)]
1383         pub(super) holder_selected_channel_reserve_satoshis: u64,
1384         #[cfg(not(test))]
1385         holder_selected_channel_reserve_satoshis: u64,
1386
1387         counterparty_htlc_minimum_msat: u64,
1388         holder_htlc_minimum_msat: u64,
1389         #[cfg(test)]
1390         pub counterparty_max_accepted_htlcs: u16,
1391         #[cfg(not(test))]
1392         counterparty_max_accepted_htlcs: u16,
1393         holder_max_accepted_htlcs: u16,
1394         minimum_depth: Option<u32>,
1395
1396         counterparty_forwarding_info: Option<CounterpartyForwardingInfo>,
1397
1398         pub(crate) channel_transaction_parameters: ChannelTransactionParameters,
1399         funding_transaction: Option<Transaction>,
1400         is_batch_funding: Option<()>,
1401
1402         counterparty_cur_commitment_point: Option<PublicKey>,
1403         counterparty_prev_commitment_point: Option<PublicKey>,
1404         counterparty_node_id: PublicKey,
1405
1406         counterparty_shutdown_scriptpubkey: Option<ScriptBuf>,
1407
1408         commitment_secrets: CounterpartyCommitmentSecrets,
1409
1410         channel_update_status: ChannelUpdateStatus,
1411         /// Once we reach `closing_negotiation_ready`, we set this, indicating if closing_signed does
1412         /// not complete within a single timer tick (one minute), we should force-close the channel.
1413         /// This prevents us from keeping unusable channels around forever if our counterparty wishes
1414         /// to DoS us.
1415         /// Note that this field is reset to false on deserialization to give us a chance to connect to
1416         /// our peer and start the closing_signed negotiation fresh.
1417         closing_signed_in_flight: bool,
1418
1419         /// Our counterparty's channel_announcement signatures provided in announcement_signatures.
1420         /// This can be used to rebroadcast the channel_announcement message later.
1421         announcement_sigs: Option<(Signature, Signature)>,
1422
1423         // We save these values so we can make sure `next_local_commit_tx_fee_msat` and
1424         // `next_remote_commit_tx_fee_msat` properly predict what the next commitment transaction fee will
1425         // be, by comparing the cached values to the fee of the tranaction generated by
1426         // `build_commitment_transaction`.
1427         #[cfg(any(test, fuzzing))]
1428         next_local_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1429         #[cfg(any(test, fuzzing))]
1430         next_remote_commitment_tx_fee_info_cached: Mutex<Option<CommitmentTxInfoCached>>,
1431
1432         /// lnd has a long-standing bug where, upon reconnection, if the channel is not yet confirmed
1433         /// they will not send a channel_reestablish until the channel locks in. Then, they will send a
1434         /// channel_ready *before* sending the channel_reestablish (which is clearly a violation of
1435         /// the BOLT specs). We copy c-lightning's workaround here and simply store the channel_ready
1436         /// message until we receive a channel_reestablish.
1437         ///
1438         /// See-also <https://github.com/lightningnetwork/lnd/issues/4006>
1439         pub workaround_lnd_bug_4006: Option<msgs::ChannelReady>,
1440
1441         /// An option set when we wish to track how many ticks have elapsed while waiting for a response
1442         /// from our counterparty after sending a message. If the peer has yet to respond after reaching
1443         /// `DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`, a reconnection should be attempted to try to
1444         /// unblock the state machine.
1445         ///
1446         /// This behavior is mostly motivated by a lnd bug in which we don't receive a message we expect
1447         /// to in a timely manner, which may lead to channels becoming unusable and/or force-closed. An
1448         /// example of such can be found at <https://github.com/lightningnetwork/lnd/issues/7682>.
1449         ///
1450         /// This is currently only used when waiting for a [`msgs::ChannelReestablish`] or
1451         /// [`msgs::RevokeAndACK`] message from the counterparty.
1452         sent_message_awaiting_response: Option<usize>,
1453
1454         #[cfg(any(test, fuzzing))]
1455         // When we receive an HTLC fulfill on an outbound path, we may immediately fulfill the
1456         // corresponding HTLC on the inbound path. If, then, the outbound path channel is
1457         // disconnected and reconnected (before we've exchange commitment_signed and revoke_and_ack
1458         // messages), they may re-broadcast their update_fulfill_htlc, causing a duplicate claim. This
1459         // is fine, but as a sanity check in our failure to generate the second claim, we check here
1460         // that the original was a claim, and that we aren't now trying to fulfill a failed HTLC.
1461         historical_inbound_htlc_fulfills: HashSet<u64>,
1462
1463         /// This channel's type, as negotiated during channel open
1464         channel_type: ChannelTypeFeatures,
1465
1466         // Our counterparty can offer us SCID aliases which they will map to this channel when routing
1467         // outbound payments. These can be used in invoice route hints to avoid explicitly revealing
1468         // the channel's funding UTXO.
1469         //
1470         // We also use this when sending our peer a channel_update that isn't to be broadcasted
1471         // publicly - allowing them to re-use their map of SCID -> channel for channel_update ->
1472         // associated channel mapping.
1473         //
1474         // We only bother storing the most recent SCID alias at any time, though our counterparty has
1475         // to store all of them.
1476         latest_inbound_scid_alias: Option<u64>,
1477
1478         // We always offer our counterparty a static SCID alias, which we recognize as for this channel
1479         // if we see it in HTLC forwarding instructions. We don't bother rotating the alias given we
1480         // don't currently support node id aliases and eventually privacy should be provided with
1481         // blinded paths instead of simple scid+node_id aliases.
1482         outbound_scid_alias: u64,
1483
1484         // We track whether we already emitted a `ChannelPending` event.
1485         channel_pending_event_emitted: bool,
1486
1487         // We track whether we already emitted a `ChannelReady` event.
1488         channel_ready_event_emitted: bool,
1489
1490         /// Some if we initiated to shut down the channel.
1491         local_initiated_shutdown: Option<()>,
1492
1493         /// The unique identifier used to re-derive the private key material for the channel through
1494         /// [`SignerProvider::derive_channel_signer`].
1495         #[cfg(not(test))]
1496         channel_keys_id: [u8; 32],
1497         #[cfg(test)]
1498         pub channel_keys_id: [u8; 32],
1499
1500         /// If we can't release a [`ChannelMonitorUpdate`] until some external action completes, we
1501         /// store it here and only release it to the `ChannelManager` once it asks for it.
1502         blocked_monitor_updates: Vec<PendingChannelMonitorUpdate>,
1503 }
1504
1505 impl<SP: Deref> ChannelContext<SP> where SP::Target: SignerProvider  {
1506         fn new_for_inbound_channel<'a, ES: Deref, F: Deref, L: Deref>(
1507                 fee_estimator: &'a LowerBoundedFeeEstimator<F>,
1508                 entropy_source: &'a ES,
1509                 signer_provider: &'a SP,
1510                 counterparty_node_id: PublicKey,
1511                 their_features: &'a InitFeatures,
1512                 user_id: u128,
1513                 config: &'a UserConfig,
1514                 current_chain_height: u32,
1515                 logger: &'a L,
1516                 is_0conf: bool,
1517                 our_funding_satoshis: u64,
1518                 counterparty_pubkeys: ChannelPublicKeys,
1519                 channel_type: ChannelTypeFeatures,
1520                 holder_selected_channel_reserve_satoshis: u64,
1521                 msg_channel_reserve_satoshis: u64,
1522                 msg_push_msat: u64,
1523                 open_channel_fields: msgs::CommonOpenChannelFields,
1524         ) -> Result<ChannelContext<SP>, ChannelError>
1525                 where
1526                         ES::Target: EntropySource,
1527                         F::Target: FeeEstimator,
1528                         L::Target: Logger,
1529                         SP::Target: SignerProvider,
1530         {
1531                 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(open_channel_fields.temporary_channel_id));
1532                 let announced_channel = if (open_channel_fields.channel_flags & 1) == 1 { true } else { false };
1533
1534                 let channel_value_satoshis = our_funding_satoshis.saturating_add(open_channel_fields.funding_satoshis);
1535
1536                 let channel_keys_id = signer_provider.generate_channel_keys_id(true, channel_value_satoshis, user_id);
1537                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
1538                 let pubkeys = holder_signer.pubkeys().clone();
1539
1540                 if config.channel_handshake_config.our_to_self_delay < BREAKDOWN_TIMEOUT {
1541                         return Err(ChannelError::Close(format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks. It must be greater than {}", config.channel_handshake_config.our_to_self_delay, BREAKDOWN_TIMEOUT)));
1542                 }
1543
1544                 // Check sanity of message fields:
1545                 if channel_value_satoshis > config.channel_handshake_limits.max_funding_satoshis {
1546                         return Err(ChannelError::Close(format!(
1547                                 "Per our config, funding must be at most {}. It was {}. Peer contribution: {}. Our contribution: {}",
1548                                 config.channel_handshake_limits.max_funding_satoshis, channel_value_satoshis,
1549                                 open_channel_fields.funding_satoshis, our_funding_satoshis)));
1550                 }
1551                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
1552                         return Err(ChannelError::Close(format!("Funding must be smaller than the total bitcoin supply. It was {}", channel_value_satoshis)));
1553                 }
1554                 if msg_channel_reserve_satoshis > channel_value_satoshis {
1555                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must be no greater than channel_value_satoshis: {}", msg_channel_reserve_satoshis, channel_value_satoshis)));
1556                 }
1557                 let full_channel_value_msat = (channel_value_satoshis - msg_channel_reserve_satoshis) * 1000;
1558                 if msg_push_msat > full_channel_value_msat {
1559                         return Err(ChannelError::Close(format!("push_msat {} was larger than channel amount minus reserve ({})", msg_push_msat, full_channel_value_msat)));
1560                 }
1561                 if open_channel_fields.dust_limit_satoshis > channel_value_satoshis {
1562                         return Err(ChannelError::Close(format!("dust_limit_satoshis {} was larger than channel_value_satoshis {}. Peer never wants payout outputs?", open_channel_fields.dust_limit_satoshis, channel_value_satoshis)));
1563                 }
1564                 if open_channel_fields.htlc_minimum_msat >= full_channel_value_msat {
1565                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) was larger than full channel value ({})", open_channel_fields.htlc_minimum_msat, full_channel_value_msat)));
1566                 }
1567                 Channel::<SP>::check_remote_fee(&channel_type, fee_estimator, open_channel_fields.commitment_feerate_sat_per_1000_weight, None, &&logger)?;
1568
1569                 let max_counterparty_selected_contest_delay = u16::min(config.channel_handshake_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
1570                 if open_channel_fields.to_self_delay > max_counterparty_selected_contest_delay {
1571                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_counterparty_selected_contest_delay, open_channel_fields.to_self_delay)));
1572                 }
1573                 if open_channel_fields.max_accepted_htlcs < 1 {
1574                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
1575                 }
1576                 if open_channel_fields.max_accepted_htlcs > MAX_HTLCS {
1577                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", open_channel_fields.max_accepted_htlcs, MAX_HTLCS)));
1578                 }
1579
1580                 // Now check against optional parameters as set by config...
1581                 if channel_value_satoshis < config.channel_handshake_limits.min_funding_satoshis {
1582                         return Err(ChannelError::Close(format!("Funding satoshis ({}) is less than the user specified limit ({})", channel_value_satoshis, config.channel_handshake_limits.min_funding_satoshis)));
1583                 }
1584                 if open_channel_fields.htlc_minimum_msat > config.channel_handshake_limits.max_htlc_minimum_msat {
1585                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", open_channel_fields.htlc_minimum_msat, config.channel_handshake_limits.max_htlc_minimum_msat)));
1586                 }
1587                 if open_channel_fields.max_htlc_value_in_flight_msat < config.channel_handshake_limits.min_max_htlc_value_in_flight_msat {
1588                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", open_channel_fields.max_htlc_value_in_flight_msat, config.channel_handshake_limits.min_max_htlc_value_in_flight_msat)));
1589                 }
1590                 if msg_channel_reserve_satoshis > config.channel_handshake_limits.max_channel_reserve_satoshis {
1591                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg_channel_reserve_satoshis, config.channel_handshake_limits.max_channel_reserve_satoshis)));
1592                 }
1593                 if open_channel_fields.max_accepted_htlcs < config.channel_handshake_limits.min_max_accepted_htlcs {
1594                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", open_channel_fields.max_accepted_htlcs, config.channel_handshake_limits.min_max_accepted_htlcs)));
1595                 }
1596                 if open_channel_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1597                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1598                 }
1599                 if open_channel_fields.dust_limit_satoshis >  MAX_CHAN_DUST_LIMIT_SATOSHIS {
1600                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", open_channel_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
1601                 }
1602
1603                 // Convert things into internal flags and prep our state:
1604
1605                 if config.channel_handshake_limits.force_announced_channel_preference {
1606                         if config.channel_handshake_config.announced_channel != announced_channel {
1607                                 return Err(ChannelError::Close("Peer tried to open channel but their announcement preference is different from ours".to_owned()));
1608                         }
1609                 }
1610
1611                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1612                         // Protocol level safety check in place, although it should never happen because
1613                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
1614                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({}). dust_limit_satoshis is ({}).", holder_selected_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
1615                 }
1616                 if holder_selected_channel_reserve_satoshis * 1000 >= full_channel_value_msat {
1617                         return Err(ChannelError::Close(format!("Suitable channel reserve not found. remote_channel_reserve was ({})msats. Channel value is ({} - {})msats.", holder_selected_channel_reserve_satoshis * 1000, full_channel_value_msat, msg_push_msat)));
1618                 }
1619                 if msg_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
1620                         log_debug!(logger, "channel_reserve_satoshis ({}) is smaller than our dust limit ({}). We can broadcast stale states without any risk, implying this channel is very insecure for our counterparty.",
1621                                 msg_channel_reserve_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS);
1622                 }
1623                 if holder_selected_channel_reserve_satoshis < open_channel_fields.dust_limit_satoshis {
1624                         return Err(ChannelError::Close(format!("Dust limit ({}) too high for the channel reserve we require the remote to keep ({})", open_channel_fields.dust_limit_satoshis, holder_selected_channel_reserve_satoshis)));
1625                 }
1626
1627                 // check if the funder's amount for the initial commitment tx is sufficient
1628                 // for full fee payment plus a few HTLCs to ensure the channel will be useful.
1629                 let anchor_outputs_value = if channel_type.supports_anchors_zero_fee_htlc_tx() {
1630                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2
1631                 } else {
1632                         0
1633                 };
1634                 let funders_amount_msat = open_channel_fields.funding_satoshis * 1000 - msg_push_msat;
1635                 let commitment_tx_fee = commit_tx_fee_msat(open_channel_fields.commitment_feerate_sat_per_1000_weight, MIN_AFFORDABLE_HTLC_COUNT, &channel_type) / 1000;
1636                 if (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value) < commitment_tx_fee {
1637                         return Err(ChannelError::Close(format!("Funding amount ({} sats) can't even pay fee for initial commitment transaction fee of {} sats.", (funders_amount_msat / 1000).saturating_sub(anchor_outputs_value), commitment_tx_fee)));
1638                 }
1639
1640                 let to_remote_satoshis = funders_amount_msat / 1000 - commitment_tx_fee - anchor_outputs_value;
1641                 // While it's reasonable for us to not meet the channel reserve initially (if they don't
1642                 // want to push much to us), our counterparty should always have more than our reserve.
1643                 if to_remote_satoshis < holder_selected_channel_reserve_satoshis {
1644                         return Err(ChannelError::Close("Insufficient funding amount for initial reserve".to_owned()));
1645                 }
1646
1647                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
1648                         match &open_channel_fields.shutdown_scriptpubkey {
1649                                 &Some(ref script) => {
1650                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
1651                                         if script.len() == 0 {
1652                                                 None
1653                                         } else {
1654                                                 if !script::is_bolt2_compliant(&script, their_features) {
1655                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)))
1656                                                 }
1657                                                 Some(script.clone())
1658                                         }
1659                                 },
1660                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
1661                                 &None => {
1662                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
1663                                 }
1664                         }
1665                 } else { None };
1666
1667                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
1668                         match signer_provider.get_shutdown_scriptpubkey() {
1669                                 Ok(scriptpubkey) => Some(scriptpubkey),
1670                                 Err(_) => return Err(ChannelError::Close("Failed to get upfront shutdown scriptpubkey".to_owned())),
1671                         }
1672                 } else { None };
1673
1674                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
1675                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
1676                                 return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
1677                         }
1678                 }
1679
1680                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
1681                         Ok(script) => script,
1682                         Err(_) => return Err(ChannelError::Close("Failed to get destination script".to_owned())),
1683                 };
1684
1685                 let mut secp_ctx = Secp256k1::new();
1686                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
1687
1688                 let minimum_depth = if is_0conf {
1689                         Some(0)
1690                 } else {
1691                         Some(cmp::max(config.channel_handshake_config.minimum_depth, 1))
1692                 };
1693
1694                 let value_to_self_msat = our_funding_satoshis * 1000 + msg_push_msat;
1695
1696                 // TODO(dual_funding): Checks for `funding_feerate_sat_per_1000_weight`?
1697
1698                 let channel_context = ChannelContext {
1699                         user_id,
1700
1701                         config: LegacyChannelConfig {
1702                                 options: config.channel_config.clone(),
1703                                 announced_channel,
1704                                 commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
1705                         },
1706
1707                         prev_config: None,
1708
1709                         inbound_handshake_limits_override: None,
1710
1711                         temporary_channel_id: Some(open_channel_fields.temporary_channel_id),
1712                         channel_id: open_channel_fields.temporary_channel_id,
1713                         channel_state: ChannelState::NegotiatingFunding(
1714                                 NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
1715                         ),
1716                         announcement_sigs_state: AnnouncementSigsState::NotSent,
1717                         secp_ctx,
1718
1719                         latest_monitor_update_id: 0,
1720
1721                         holder_signer: ChannelSignerType::Ecdsa(holder_signer),
1722                         shutdown_scriptpubkey,
1723                         destination_script,
1724
1725                         cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1726                         cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
1727                         value_to_self_msat,
1728
1729                         pending_inbound_htlcs: Vec::new(),
1730                         pending_outbound_htlcs: Vec::new(),
1731                         holding_cell_htlc_updates: Vec::new(),
1732                         pending_update_fee: None,
1733                         holding_cell_update_fee: None,
1734                         next_holder_htlc_id: 0,
1735                         next_counterparty_htlc_id: 0,
1736                         update_time_counter: 1,
1737
1738                         resend_order: RAACommitmentOrder::CommitmentFirst,
1739
1740                         monitor_pending_channel_ready: false,
1741                         monitor_pending_revoke_and_ack: false,
1742                         monitor_pending_commitment_signed: false,
1743                         monitor_pending_forwards: Vec::new(),
1744                         monitor_pending_failures: Vec::new(),
1745                         monitor_pending_finalized_fulfills: Vec::new(),
1746
1747                         signer_pending_commitment_update: false,
1748                         signer_pending_funding: false,
1749
1750
1751                         #[cfg(debug_assertions)]
1752                         holder_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1753                         #[cfg(debug_assertions)]
1754                         counterparty_max_commitment_tx_output: Mutex::new((value_to_self_msat, (channel_value_satoshis * 1000 - msg_push_msat).saturating_sub(value_to_self_msat))),
1755
1756                         last_sent_closing_fee: None,
1757                         pending_counterparty_closing_signed: None,
1758                         expecting_peer_commitment_signed: false,
1759                         closing_fee_limits: None,
1760                         target_closing_feerate_sats_per_kw: None,
1761
1762                         funding_tx_confirmed_in: None,
1763                         funding_tx_confirmation_height: 0,
1764                         short_channel_id: None,
1765                         channel_creation_height: current_chain_height,
1766
1767                         feerate_per_kw: open_channel_fields.commitment_feerate_sat_per_1000_weight,
1768                         channel_value_satoshis,
1769                         counterparty_dust_limit_satoshis: open_channel_fields.dust_limit_satoshis,
1770                         holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
1771                         counterparty_max_htlc_value_in_flight_msat: cmp::min(open_channel_fields.max_htlc_value_in_flight_msat, channel_value_satoshis * 1000),
1772                         holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
1773                         counterparty_selected_channel_reserve_satoshis: Some(msg_channel_reserve_satoshis),
1774                         holder_selected_channel_reserve_satoshis,
1775                         counterparty_htlc_minimum_msat: open_channel_fields.htlc_minimum_msat,
1776                         holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
1777                         counterparty_max_accepted_htlcs: open_channel_fields.max_accepted_htlcs,
1778                         holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
1779                         minimum_depth,
1780
1781                         counterparty_forwarding_info: None,
1782
1783                         channel_transaction_parameters: ChannelTransactionParameters {
1784                                 holder_pubkeys: pubkeys,
1785                                 holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
1786                                 is_outbound_from_holder: false,
1787                                 counterparty_parameters: Some(CounterpartyChannelTransactionParameters {
1788                                         selected_contest_delay: open_channel_fields.to_self_delay,
1789                                         pubkeys: counterparty_pubkeys,
1790                                 }),
1791                                 funding_outpoint: None,
1792                                 channel_type_features: channel_type.clone()
1793                         },
1794                         funding_transaction: None,
1795                         is_batch_funding: None,
1796
1797                         counterparty_cur_commitment_point: Some(open_channel_fields.first_per_commitment_point),
1798                         counterparty_prev_commitment_point: None,
1799                         counterparty_node_id,
1800
1801                         counterparty_shutdown_scriptpubkey,
1802
1803                         commitment_secrets: CounterpartyCommitmentSecrets::new(),
1804
1805                         channel_update_status: ChannelUpdateStatus::Enabled,
1806                         closing_signed_in_flight: false,
1807
1808                         announcement_sigs: None,
1809
1810                         #[cfg(any(test, fuzzing))]
1811                         next_local_commitment_tx_fee_info_cached: Mutex::new(None),
1812                         #[cfg(any(test, fuzzing))]
1813                         next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
1814
1815                         workaround_lnd_bug_4006: None,
1816                         sent_message_awaiting_response: None,
1817
1818                         latest_inbound_scid_alias: None,
1819                         outbound_scid_alias: 0,
1820
1821                         channel_pending_event_emitted: false,
1822                         channel_ready_event_emitted: false,
1823
1824                         #[cfg(any(test, fuzzing))]
1825                         historical_inbound_htlc_fulfills: new_hash_set(),
1826
1827                         channel_type,
1828                         channel_keys_id,
1829
1830                         local_initiated_shutdown: None,
1831
1832                         blocked_monitor_updates: Vec::new(),
1833                 };
1834
1835                 Ok(channel_context)
1836         }
1837
1838         /// Allowed in any state (including after shutdown)
1839         pub fn get_update_time_counter(&self) -> u32 {
1840                 self.update_time_counter
1841         }
1842
1843         pub fn get_latest_monitor_update_id(&self) -> u64 {
1844                 self.latest_monitor_update_id
1845         }
1846
1847         pub fn should_announce(&self) -> bool {
1848                 self.config.announced_channel
1849         }
1850
1851         pub fn is_outbound(&self) -> bool {
1852                 self.channel_transaction_parameters.is_outbound_from_holder
1853         }
1854
1855         /// Gets the fee we'd want to charge for adding an HTLC output to this Channel
1856         /// Allowed in any state (including after shutdown)
1857         pub fn get_outbound_forwarding_fee_base_msat(&self) -> u32 {
1858                 self.config.options.forwarding_fee_base_msat
1859         }
1860
1861         /// Returns true if we've ever received a message from the remote end for this Channel
1862         pub fn have_received_message(&self) -> bool {
1863                 self.channel_state > ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT)
1864         }
1865
1866         /// Returns true if this channel is fully established and not known to be closing.
1867         /// Allowed in any state (including after shutdown)
1868         pub fn is_usable(&self) -> bool {
1869                 matches!(self.channel_state, ChannelState::ChannelReady(_)) &&
1870                         !self.channel_state.is_local_shutdown_sent() &&
1871                         !self.channel_state.is_remote_shutdown_sent() &&
1872                         !self.monitor_pending_channel_ready
1873         }
1874
1875         /// shutdown state returns the state of the channel in its various stages of shutdown
1876         pub fn shutdown_state(&self) -> ChannelShutdownState {
1877                 match self.channel_state {
1878                         ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_) =>
1879                                 if self.channel_state.is_local_shutdown_sent() && !self.channel_state.is_remote_shutdown_sent() {
1880                                         ChannelShutdownState::ShutdownInitiated
1881                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && !self.closing_negotiation_ready() {
1882                                         ChannelShutdownState::ResolvingHTLCs
1883                                 } else if (self.channel_state.is_local_shutdown_sent() || self.channel_state.is_remote_shutdown_sent()) && self.closing_negotiation_ready() {
1884                                         ChannelShutdownState::NegotiatingClosingFee
1885                                 } else {
1886                                         ChannelShutdownState::NotShuttingDown
1887                                 },
1888                         ChannelState::ShutdownComplete => ChannelShutdownState::ShutdownComplete,
1889                         _ => ChannelShutdownState::NotShuttingDown,
1890                 }
1891         }
1892
1893         fn closing_negotiation_ready(&self) -> bool {
1894                 let is_ready_to_close = match self.channel_state {
1895                         ChannelState::AwaitingChannelReady(flags) =>
1896                                 flags & FundedStateFlags::ALL == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1897                         ChannelState::ChannelReady(flags) =>
1898                                 flags == FundedStateFlags::LOCAL_SHUTDOWN_SENT | FundedStateFlags::REMOTE_SHUTDOWN_SENT,
1899                         _ => false,
1900                 };
1901                 self.pending_inbound_htlcs.is_empty() &&
1902                         self.pending_outbound_htlcs.is_empty() &&
1903                         self.pending_update_fee.is_none() &&
1904                         is_ready_to_close
1905         }
1906
1907         /// Returns true if this channel is currently available for use. This is a superset of
1908         /// is_usable() and considers things like the channel being temporarily disabled.
1909         /// Allowed in any state (including after shutdown)
1910         pub fn is_live(&self) -> bool {
1911                 self.is_usable() && !self.channel_state.is_peer_disconnected()
1912         }
1913
1914         // Public utilities:
1915
1916         pub fn channel_id(&self) -> ChannelId {
1917                 self.channel_id
1918         }
1919
1920         // Return the `temporary_channel_id` used during channel establishment.
1921         //
1922         // Will return `None` for channels created prior to LDK version 0.0.115.
1923         pub fn temporary_channel_id(&self) -> Option<ChannelId> {
1924                 self.temporary_channel_id
1925         }
1926
1927         pub fn minimum_depth(&self) -> Option<u32> {
1928                 self.minimum_depth
1929         }
1930
1931         /// Gets the "user_id" value passed into the construction of this channel. It has no special
1932         /// meaning and exists only to allow users to have a persistent identifier of a channel.
1933         pub fn get_user_id(&self) -> u128 {
1934                 self.user_id
1935         }
1936
1937         /// Gets the channel's type
1938         pub fn get_channel_type(&self) -> &ChannelTypeFeatures {
1939                 &self.channel_type
1940         }
1941
1942         /// Gets the channel's `short_channel_id`.
1943         ///
1944         /// Will return `None` if the channel hasn't been confirmed yet.
1945         pub fn get_short_channel_id(&self) -> Option<u64> {
1946                 self.short_channel_id
1947         }
1948
1949         /// Allowed in any state (including after shutdown)
1950         pub fn latest_inbound_scid_alias(&self) -> Option<u64> {
1951                 self.latest_inbound_scid_alias
1952         }
1953
1954         /// Allowed in any state (including after shutdown)
1955         pub fn outbound_scid_alias(&self) -> u64 {
1956                 self.outbound_scid_alias
1957         }
1958
1959         /// Returns the holder signer for this channel.
1960         #[cfg(test)]
1961         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
1962                 return &self.holder_signer
1963         }
1964
1965         /// Only allowed immediately after deserialization if get_outbound_scid_alias returns 0,
1966         /// indicating we were written by LDK prior to 0.0.106 which did not set outbound SCID aliases
1967         /// or prior to any channel actions during `Channel` initialization.
1968         pub fn set_outbound_scid_alias(&mut self, outbound_scid_alias: u64) {
1969                 debug_assert_eq!(self.outbound_scid_alias, 0);
1970                 self.outbound_scid_alias = outbound_scid_alias;
1971         }
1972
1973         /// Returns the funding_txo we either got from our peer, or were given by
1974         /// get_funding_created.
1975         pub fn get_funding_txo(&self) -> Option<OutPoint> {
1976                 self.channel_transaction_parameters.funding_outpoint
1977         }
1978
1979         /// Returns the height in which our funding transaction was confirmed.
1980         pub fn get_funding_tx_confirmation_height(&self) -> Option<u32> {
1981                 let conf_height = self.funding_tx_confirmation_height;
1982                 if conf_height > 0 {
1983                         Some(conf_height)
1984                 } else {
1985                         None
1986                 }
1987         }
1988
1989         /// Returns the block hash in which our funding transaction was confirmed.
1990         pub fn get_funding_tx_confirmed_in(&self) -> Option<BlockHash> {
1991                 self.funding_tx_confirmed_in
1992         }
1993
1994         /// Returns the current number of confirmations on the funding transaction.
1995         pub fn get_funding_tx_confirmations(&self, height: u32) -> u32 {
1996                 if self.funding_tx_confirmation_height == 0 {
1997                         // We either haven't seen any confirmation yet, or observed a reorg.
1998                         return 0;
1999                 }
2000
2001                 height.checked_sub(self.funding_tx_confirmation_height).map_or(0, |c| c + 1)
2002         }
2003
2004         fn get_holder_selected_contest_delay(&self) -> u16 {
2005                 self.channel_transaction_parameters.holder_selected_contest_delay
2006         }
2007
2008         fn get_holder_pubkeys(&self) -> &ChannelPublicKeys {
2009                 &self.channel_transaction_parameters.holder_pubkeys
2010         }
2011
2012         pub fn get_counterparty_selected_contest_delay(&self) -> Option<u16> {
2013                 self.channel_transaction_parameters.counterparty_parameters
2014                         .as_ref().map(|params| params.selected_contest_delay)
2015         }
2016
2017         fn get_counterparty_pubkeys(&self) -> &ChannelPublicKeys {
2018                 &self.channel_transaction_parameters.counterparty_parameters.as_ref().unwrap().pubkeys
2019         }
2020
2021         /// Allowed in any state (including after shutdown)
2022         pub fn get_counterparty_node_id(&self) -> PublicKey {
2023                 self.counterparty_node_id
2024         }
2025
2026         /// Allowed in any state (including after shutdown)
2027         pub fn get_holder_htlc_minimum_msat(&self) -> u64 {
2028                 self.holder_htlc_minimum_msat
2029         }
2030
2031         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2032         pub fn get_holder_htlc_maximum_msat(&self) -> Option<u64> {
2033                 self.get_htlc_maximum_msat(self.holder_max_htlc_value_in_flight_msat)
2034         }
2035
2036         /// Allowed in any state (including after shutdown)
2037         pub fn get_announced_htlc_max_msat(&self) -> u64 {
2038                 return cmp::min(
2039                         // Upper bound by capacity. We make it a bit less than full capacity to prevent attempts
2040                         // to use full capacity. This is an effort to reduce routing failures, because in many cases
2041                         // channel might have been used to route very small values (either by honest users or as DoS).
2042                         self.channel_value_satoshis * 1000 * 9 / 10,
2043
2044                         self.counterparty_max_htlc_value_in_flight_msat
2045                 );
2046         }
2047
2048         /// Allowed in any state (including after shutdown)
2049         pub fn get_counterparty_htlc_minimum_msat(&self) -> u64 {
2050                 self.counterparty_htlc_minimum_msat
2051         }
2052
2053         /// Allowed in any state (including after shutdown), but will return none before TheirInitSent
2054         pub fn get_counterparty_htlc_maximum_msat(&self) -> Option<u64> {
2055                 self.get_htlc_maximum_msat(self.counterparty_max_htlc_value_in_flight_msat)
2056         }
2057
2058         fn get_htlc_maximum_msat(&self, party_max_htlc_value_in_flight_msat: u64) -> Option<u64> {
2059                 self.counterparty_selected_channel_reserve_satoshis.map(|counterparty_reserve| {
2060                         let holder_reserve = self.holder_selected_channel_reserve_satoshis;
2061                         cmp::min(
2062                                 (self.channel_value_satoshis - counterparty_reserve - holder_reserve) * 1000,
2063                                 party_max_htlc_value_in_flight_msat
2064                         )
2065                 })
2066         }
2067
2068         pub fn get_value_satoshis(&self) -> u64 {
2069                 self.channel_value_satoshis
2070         }
2071
2072         pub fn get_fee_proportional_millionths(&self) -> u32 {
2073                 self.config.options.forwarding_fee_proportional_millionths
2074         }
2075
2076         pub fn get_cltv_expiry_delta(&self) -> u16 {
2077                 cmp::max(self.config.options.cltv_expiry_delta, MIN_CLTV_EXPIRY_DELTA)
2078         }
2079
2080         pub fn get_max_dust_htlc_exposure_msat<F: Deref>(&self,
2081                 fee_estimator: &LowerBoundedFeeEstimator<F>) -> u64
2082         where F::Target: FeeEstimator
2083         {
2084                 match self.config.options.max_dust_htlc_exposure {
2085                         MaxDustHTLCExposure::FeeRateMultiplier(multiplier) => {
2086                                 let feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(
2087                                         ConfirmationTarget::OnChainSweep) as u64;
2088                                 feerate_per_kw.saturating_mul(multiplier)
2089                         },
2090                         MaxDustHTLCExposure::FixedLimitMsat(limit) => limit,
2091                 }
2092         }
2093
2094         /// Returns the previous [`ChannelConfig`] applied to this channel, if any.
2095         pub fn prev_config(&self) -> Option<ChannelConfig> {
2096                 self.prev_config.map(|prev_config| prev_config.0)
2097         }
2098
2099         // Checks whether we should emit a `ChannelPending` event.
2100         pub(crate) fn should_emit_channel_pending_event(&mut self) -> bool {
2101                 self.is_funding_broadcast() && !self.channel_pending_event_emitted
2102         }
2103
2104         // Returns whether we already emitted a `ChannelPending` event.
2105         pub(crate) fn channel_pending_event_emitted(&self) -> bool {
2106                 self.channel_pending_event_emitted
2107         }
2108
2109         // Remembers that we already emitted a `ChannelPending` event.
2110         pub(crate) fn set_channel_pending_event_emitted(&mut self) {
2111                 self.channel_pending_event_emitted = true;
2112         }
2113
2114         // Checks whether we should emit a `ChannelReady` event.
2115         pub(crate) fn should_emit_channel_ready_event(&mut self) -> bool {
2116                 self.is_usable() && !self.channel_ready_event_emitted
2117         }
2118
2119         // Remembers that we already emitted a `ChannelReady` event.
2120         pub(crate) fn set_channel_ready_event_emitted(&mut self) {
2121                 self.channel_ready_event_emitted = true;
2122         }
2123
2124         /// Tracks the number of ticks elapsed since the previous [`ChannelConfig`] was updated. Once
2125         /// [`EXPIRE_PREV_CONFIG_TICKS`] is reached, the previous config is considered expired and will
2126         /// no longer be considered when forwarding HTLCs.
2127         pub fn maybe_expire_prev_config(&mut self) {
2128                 if self.prev_config.is_none() {
2129                         return;
2130                 }
2131                 let prev_config = self.prev_config.as_mut().unwrap();
2132                 prev_config.1 += 1;
2133                 if prev_config.1 == EXPIRE_PREV_CONFIG_TICKS {
2134                         self.prev_config = None;
2135                 }
2136         }
2137
2138         /// Returns the current [`ChannelConfig`] applied to the channel.
2139         pub fn config(&self) -> ChannelConfig {
2140                 self.config.options
2141         }
2142
2143         /// Updates the channel's config. A bool is returned indicating whether the config update
2144         /// applied resulted in a new ChannelUpdate message.
2145         pub fn update_config(&mut self, config: &ChannelConfig) -> bool {
2146                 let did_channel_update =
2147                         self.config.options.forwarding_fee_proportional_millionths != config.forwarding_fee_proportional_millionths ||
2148                         self.config.options.forwarding_fee_base_msat != config.forwarding_fee_base_msat ||
2149                         self.config.options.cltv_expiry_delta != config.cltv_expiry_delta;
2150                 if did_channel_update {
2151                         self.prev_config = Some((self.config.options, 0));
2152                         // Update the counter, which backs the ChannelUpdate timestamp, to allow the relay
2153                         // policy change to propagate throughout the network.
2154                         self.update_time_counter += 1;
2155                 }
2156                 self.config.options = *config;
2157                 did_channel_update
2158         }
2159
2160         /// Returns true if funding_signed was sent/received and the
2161         /// funding transaction has been broadcast if necessary.
2162         pub fn is_funding_broadcast(&self) -> bool {
2163                 !self.channel_state.is_pre_funded_state() &&
2164                         !matches!(self.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH))
2165         }
2166
2167         /// Transaction nomenclature is somewhat confusing here as there are many different cases - a
2168         /// transaction is referred to as "a's transaction" implying that a will be able to broadcast
2169         /// the transaction. Thus, b will generally be sending a signature over such a transaction to
2170         /// a, and a can revoke the transaction by providing b the relevant per_commitment_secret. As
2171         /// such, a transaction is generally the result of b increasing the amount paid to a (or adding
2172         /// an HTLC to a).
2173         /// @local is used only to convert relevant internal structures which refer to remote vs local
2174         /// to decide value of outputs and direction of HTLCs.
2175         /// @generated_by_local is used to determine *which* HTLCs to include - noting that the HTLC
2176         /// state may indicate that one peer has informed the other that they'd like to add an HTLC but
2177         /// have not yet committed it. Such HTLCs will only be included in transactions which are being
2178         /// generated by the peer which proposed adding the HTLCs, and thus we need to understand both
2179         /// which peer generated this transaction and "to whom" this transaction flows.
2180         #[inline]
2181         fn build_commitment_transaction<L: Deref>(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> CommitmentStats
2182                 where L::Target: Logger
2183         {
2184                 let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new();
2185                 let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len();
2186                 let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs);
2187
2188                 let broadcaster_dust_limit_satoshis = if local { self.holder_dust_limit_satoshis } else { self.counterparty_dust_limit_satoshis };
2189                 let mut remote_htlc_total_msat = 0;
2190                 let mut local_htlc_total_msat = 0;
2191                 let mut value_to_self_msat_offset = 0;
2192
2193                 let mut feerate_per_kw = self.feerate_per_kw;
2194                 if let Some((feerate, update_state)) = self.pending_update_fee {
2195                         if match update_state {
2196                                 // Note that these match the inclusion criteria when scanning
2197                                 // pending_inbound_htlcs below.
2198                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local },
2199                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local },
2200                                 FeeUpdateState::Outbound => { assert!(self.is_outbound());  generated_by_local },
2201                         } {
2202                                 feerate_per_kw = feerate;
2203                         }
2204                 }
2205
2206                 log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...",
2207                         commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number),
2208                         get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()),
2209                         &self.channel_id,
2210                         if local { "us" } else { "remote" }, if generated_by_local { "us" } else { "remote" }, feerate_per_kw);
2211
2212                 macro_rules! get_htlc_in_commitment {
2213                         ($htlc: expr, $offered: expr) => {
2214                                 HTLCOutputInCommitment {
2215                                         offered: $offered,
2216                                         amount_msat: $htlc.amount_msat,
2217                                         cltv_expiry: $htlc.cltv_expiry,
2218                                         payment_hash: $htlc.payment_hash,
2219                                         transaction_output_index: None
2220                                 }
2221                         }
2222                 }
2223
2224                 macro_rules! add_htlc_output {
2225                         ($htlc: expr, $outbound: expr, $source: expr, $state_name: expr) => {
2226                                 if $outbound == local { // "offered HTLC output"
2227                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, true);
2228                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2229                                                 0
2230                                         } else {
2231                                                 feerate_per_kw as u64 * htlc_timeout_tx_weight(self.get_channel_type()) / 1000
2232                                         };
2233                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2234                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2235                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
2236                                         } else {
2237                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {} due to dust limit", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2238                                                 included_dust_htlcs.push((htlc_in_tx, $source));
2239                                         }
2240                                 } else {
2241                                         let htlc_in_tx = get_htlc_in_commitment!($htlc, false);
2242                                         let htlc_tx_fee = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2243                                                 0
2244                                         } else {
2245                                                 feerate_per_kw as u64 * htlc_success_tx_weight(self.get_channel_type()) / 1000
2246                                         };
2247                                         if $htlc.amount_msat / 1000 >= broadcaster_dust_limit_satoshis + htlc_tx_fee {
2248                                                 log_trace!(logger, "   ...including {} {} HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2249                                                 included_non_dust_htlcs.push((htlc_in_tx, $source));
2250                                         } else {
2251                                                 log_trace!(logger, "   ...including {} {} dust HTLC {} (hash {}) with value {}", if $outbound { "outbound" } else { "inbound" }, $state_name, $htlc.htlc_id, &$htlc.payment_hash, $htlc.amount_msat);
2252                                                 included_dust_htlcs.push((htlc_in_tx, $source));
2253                                         }
2254                                 }
2255                         }
2256                 }
2257
2258                 let mut inbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2259
2260                 for ref htlc in self.pending_inbound_htlcs.iter() {
2261                         let (include, state_name) = match htlc.state {
2262                                 InboundHTLCState::RemoteAnnounced(_) => (!generated_by_local, "RemoteAnnounced"),
2263                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) => (!generated_by_local, "AwaitingRemoteRevokeToAnnounce"),
2264                                 InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => (true, "AwaitingAnnouncedRemoteRevoke"),
2265                                 InboundHTLCState::Committed => (true, "Committed"),
2266                                 InboundHTLCState::LocalRemoved(_) => (!generated_by_local, "LocalRemoved"),
2267                         };
2268
2269                         if include {
2270                                 add_htlc_output!(htlc, false, None, state_name);
2271                                 remote_htlc_total_msat += htlc.amount_msat;
2272                         } else {
2273                                 log_trace!(logger, "   ...not including inbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2274                                 match &htlc.state {
2275                                         &InboundHTLCState::LocalRemoved(ref reason) => {
2276                                                 if generated_by_local {
2277                                                         if let &InboundHTLCRemovalReason::Fulfill(preimage) = reason {
2278                                                                 inbound_htlc_preimages.push(preimage);
2279                                                                 value_to_self_msat_offset += htlc.amount_msat as i64;
2280                                                         }
2281                                                 }
2282                                         },
2283                                         _ => {},
2284                                 }
2285                         }
2286                 }
2287
2288
2289                 let mut outbound_htlc_preimages: Vec<PaymentPreimage> = Vec::new();
2290
2291                 for ref htlc in self.pending_outbound_htlcs.iter() {
2292                         let (include, state_name) = match htlc.state {
2293                                 OutboundHTLCState::LocalAnnounced(_) => (generated_by_local, "LocalAnnounced"),
2294                                 OutboundHTLCState::Committed => (true, "Committed"),
2295                                 OutboundHTLCState::RemoteRemoved(_) => (generated_by_local, "RemoteRemoved"),
2296                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) => (generated_by_local, "AwaitingRemoteRevokeToRemove"),
2297                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) => (false, "AwaitingRemovedRemoteRevoke"),
2298                         };
2299
2300                         let preimage_opt = match htlc.state {
2301                                 OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(p)) => p,
2302                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(p)) => p,
2303                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(p)) => p,
2304                                 _ => None,
2305                         };
2306
2307                         if let Some(preimage) = preimage_opt {
2308                                 outbound_htlc_preimages.push(preimage);
2309                         }
2310
2311                         if include {
2312                                 add_htlc_output!(htlc, true, Some(&htlc.source), state_name);
2313                                 local_htlc_total_msat += htlc.amount_msat;
2314                         } else {
2315                                 log_trace!(logger, "   ...not including outbound HTLC {} (hash {}) with value {} due to state ({})", htlc.htlc_id, &htlc.payment_hash, htlc.amount_msat, state_name);
2316                                 match htlc.state {
2317                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_))|OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) => {
2318                                                 value_to_self_msat_offset -= htlc.amount_msat as i64;
2319                                         },
2320                                         OutboundHTLCState::RemoteRemoved(OutboundHTLCOutcome::Success(_)) => {
2321                                                 if !generated_by_local {
2322                                                         value_to_self_msat_offset -= htlc.amount_msat as i64;
2323                                                 }
2324                                         },
2325                                         _ => {},
2326                                 }
2327                         }
2328                 }
2329
2330                 let value_to_self_msat: i64 = (self.value_to_self_msat - local_htlc_total_msat) as i64 + value_to_self_msat_offset;
2331                 assert!(value_to_self_msat >= 0);
2332                 // Note that in case they have several just-awaiting-last-RAA fulfills in-progress (ie
2333                 // AwaitingRemoteRevokeToRemove or AwaitingRemovedRemoteRevoke) we may have allowed them to
2334                 // "violate" their reserve value by couting those against it. Thus, we have to convert
2335                 // everything to i64 before subtracting as otherwise we can overflow.
2336                 let value_to_remote_msat: i64 = (self.channel_value_satoshis * 1000) as i64 - (self.value_to_self_msat as i64) - (remote_htlc_total_msat as i64) - value_to_self_msat_offset;
2337                 assert!(value_to_remote_msat >= 0);
2338
2339                 #[cfg(debug_assertions)]
2340                 {
2341                         // Make sure that the to_self/to_remote is always either past the appropriate
2342                         // channel_reserve *or* it is making progress towards it.
2343                         let mut broadcaster_max_commitment_tx_output = if generated_by_local {
2344                                 self.holder_max_commitment_tx_output.lock().unwrap()
2345                         } else {
2346                                 self.counterparty_max_commitment_tx_output.lock().unwrap()
2347                         };
2348                         debug_assert!(broadcaster_max_commitment_tx_output.0 <= value_to_self_msat as u64 || value_to_self_msat / 1000 >= self.counterparty_selected_channel_reserve_satoshis.unwrap() as i64);
2349                         broadcaster_max_commitment_tx_output.0 = cmp::max(broadcaster_max_commitment_tx_output.0, value_to_self_msat as u64);
2350                         debug_assert!(broadcaster_max_commitment_tx_output.1 <= value_to_remote_msat as u64 || value_to_remote_msat / 1000 >= self.holder_selected_channel_reserve_satoshis as i64);
2351                         broadcaster_max_commitment_tx_output.1 = cmp::max(broadcaster_max_commitment_tx_output.1, value_to_remote_msat as u64);
2352                 }
2353
2354                 let total_fee_sat = commit_tx_fee_sat(feerate_per_kw, included_non_dust_htlcs.len(), &self.channel_transaction_parameters.channel_type_features);
2355                 let anchors_val = if self.channel_transaction_parameters.channel_type_features.supports_anchors_zero_fee_htlc_tx() { ANCHOR_OUTPUT_VALUE_SATOSHI * 2 } else { 0 } as i64;
2356                 let (value_to_self, value_to_remote) = if self.is_outbound() {
2357                         (value_to_self_msat / 1000 - anchors_val - total_fee_sat as i64, value_to_remote_msat / 1000)
2358                 } else {
2359                         (value_to_self_msat / 1000, value_to_remote_msat / 1000 - anchors_val - total_fee_sat as i64)
2360                 };
2361
2362                 let mut value_to_a = if local { value_to_self } else { value_to_remote };
2363                 let mut value_to_b = if local { value_to_remote } else { value_to_self };
2364                 let (funding_pubkey_a, funding_pubkey_b) = if local {
2365                         (self.get_holder_pubkeys().funding_pubkey, self.get_counterparty_pubkeys().funding_pubkey)
2366                 } else {
2367                         (self.get_counterparty_pubkeys().funding_pubkey, self.get_holder_pubkeys().funding_pubkey)
2368                 };
2369
2370                 if value_to_a >= (broadcaster_dust_limit_satoshis as i64) {
2371                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_local" } else { "to_remote" }, value_to_a);
2372                 } else {
2373                         value_to_a = 0;
2374                 }
2375
2376                 if value_to_b >= (broadcaster_dust_limit_satoshis as i64) {
2377                         log_trace!(logger, "   ...including {} output with value {}", if local { "to_remote" } else { "to_local" }, value_to_b);
2378                 } else {
2379                         value_to_b = 0;
2380                 }
2381
2382                 let num_nondust_htlcs = included_non_dust_htlcs.len();
2383
2384                 let channel_parameters =
2385                         if local { self.channel_transaction_parameters.as_holder_broadcastable() }
2386                         else { self.channel_transaction_parameters.as_counterparty_broadcastable() };
2387                 let tx = CommitmentTransaction::new_with_auxiliary_htlc_data(commitment_number,
2388                                                                              value_to_a as u64,
2389                                                                              value_to_b as u64,
2390                                                                              funding_pubkey_a,
2391                                                                              funding_pubkey_b,
2392                                                                              keys.clone(),
2393                                                                              feerate_per_kw,
2394                                                                              &mut included_non_dust_htlcs,
2395                                                                              &channel_parameters
2396                 );
2397                 let mut htlcs_included = included_non_dust_htlcs;
2398                 // The unwrap is safe, because all non-dust HTLCs have been assigned an output index
2399                 htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap());
2400                 htlcs_included.append(&mut included_dust_htlcs);
2401
2402                 CommitmentStats {
2403                         tx,
2404                         feerate_per_kw,
2405                         total_fee_sat,
2406                         num_nondust_htlcs,
2407                         htlcs_included,
2408                         local_balance_msat: value_to_self_msat as u64,
2409                         remote_balance_msat: value_to_remote_msat as u64,
2410                         inbound_htlc_preimages,
2411                         outbound_htlc_preimages,
2412                 }
2413         }
2414
2415         #[inline]
2416         /// Creates a set of keys for build_commitment_transaction to generate a transaction which our
2417         /// counterparty will sign (ie DO NOT send signatures over a transaction created by this to
2418         /// our counterparty!)
2419         /// The result is a transaction which we can revoke broadcastership of (ie a "local" transaction)
2420         /// TODO Some magic rust shit to compile-time check this?
2421         fn build_holder_transaction_keys(&self, commitment_number: u64) -> TxCreationKeys {
2422                 let per_commitment_point = self.holder_signer.as_ref().get_per_commitment_point(commitment_number, &self.secp_ctx);
2423                 let delayed_payment_base = &self.get_holder_pubkeys().delayed_payment_basepoint;
2424                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2425                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2426
2427                 TxCreationKeys::derive_new(&self.secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint)
2428         }
2429
2430         #[inline]
2431         /// Creates a set of keys for build_commitment_transaction to generate a transaction which we
2432         /// will sign and send to our counterparty.
2433         /// If an Err is returned, it is a ChannelError::Close (for get_funding_created)
2434         fn build_remote_transaction_keys(&self) -> TxCreationKeys {
2435                 let revocation_basepoint = &self.get_holder_pubkeys().revocation_basepoint;
2436                 let htlc_basepoint = &self.get_holder_pubkeys().htlc_basepoint;
2437                 let counterparty_pubkeys = self.get_counterparty_pubkeys();
2438
2439                 TxCreationKeys::derive_new(&self.secp_ctx, &self.counterparty_cur_commitment_point.unwrap(), &counterparty_pubkeys.delayed_payment_basepoint, &counterparty_pubkeys.htlc_basepoint, revocation_basepoint, htlc_basepoint)
2440         }
2441
2442         /// Gets the redeemscript for the funding transaction output (ie the funding transaction output
2443         /// pays to get_funding_redeemscript().to_v0_p2wsh()).
2444         /// Panics if called before accept_channel/InboundV1Channel::new
2445         pub fn get_funding_redeemscript(&self) -> ScriptBuf {
2446                 make_funding_redeemscript(&self.get_holder_pubkeys().funding_pubkey, self.counterparty_funding_pubkey())
2447         }
2448
2449         fn counterparty_funding_pubkey(&self) -> &PublicKey {
2450                 &self.get_counterparty_pubkeys().funding_pubkey
2451         }
2452
2453         pub fn get_feerate_sat_per_1000_weight(&self) -> u32 {
2454                 self.feerate_per_kw
2455         }
2456
2457         pub fn get_dust_buffer_feerate(&self, outbound_feerate_update: Option<u32>) -> u32 {
2458                 // When calculating our exposure to dust HTLCs, we assume that the channel feerate
2459                 // may, at any point, increase by at least 10 sat/vB (i.e 2530 sat/kWU) or 25%,
2460                 // whichever is higher. This ensures that we aren't suddenly exposed to significantly
2461                 // more dust balance if the feerate increases when we have several HTLCs pending
2462                 // which are near the dust limit.
2463                 let mut feerate_per_kw = self.feerate_per_kw;
2464                 // If there's a pending update fee, use it to ensure we aren't under-estimating
2465                 // potential feerate updates coming soon.
2466                 if let Some((feerate, _)) = self.pending_update_fee {
2467                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2468                 }
2469                 if let Some(feerate) = outbound_feerate_update {
2470                         feerate_per_kw = cmp::max(feerate_per_kw, feerate);
2471                 }
2472                 let feerate_plus_quarter = feerate_per_kw.checked_mul(1250).map(|v| v / 1000);
2473                 cmp::max(2530, feerate_plus_quarter.unwrap_or(u32::max_value()))
2474         }
2475
2476         /// Get forwarding information for the counterparty.
2477         pub fn counterparty_forwarding_info(&self) -> Option<CounterpartyForwardingInfo> {
2478                 self.counterparty_forwarding_info.clone()
2479         }
2480
2481         /// Returns a HTLCStats about inbound pending htlcs
2482         fn get_inbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2483                 let context = self;
2484                 let mut stats = HTLCStats {
2485                         pending_htlcs: context.pending_inbound_htlcs.len() as u32,
2486                         pending_htlcs_value_msat: 0,
2487                         on_counterparty_tx_dust_exposure_msat: 0,
2488                         on_holder_tx_dust_exposure_msat: 0,
2489                         holding_cell_msat: 0,
2490                         on_holder_tx_holding_cell_htlcs_count: 0,
2491                 };
2492
2493                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2494                         (0, 0)
2495                 } else {
2496                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2497                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2498                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2499                 };
2500                 let counterparty_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2501                 let holder_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2502                 for ref htlc in context.pending_inbound_htlcs.iter() {
2503                         stats.pending_htlcs_value_msat += htlc.amount_msat;
2504                         if htlc.amount_msat / 1000 < counterparty_dust_limit_timeout_sat {
2505                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2506                         }
2507                         if htlc.amount_msat / 1000 < holder_dust_limit_success_sat {
2508                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2509                         }
2510                 }
2511                 stats
2512         }
2513
2514         /// Returns a HTLCStats about pending outbound htlcs, *including* pending adds in our holding cell.
2515         fn get_outbound_pending_htlc_stats(&self, outbound_feerate_update: Option<u32>) -> HTLCStats {
2516                 let context = self;
2517                 let mut stats = HTLCStats {
2518                         pending_htlcs: context.pending_outbound_htlcs.len() as u32,
2519                         pending_htlcs_value_msat: 0,
2520                         on_counterparty_tx_dust_exposure_msat: 0,
2521                         on_holder_tx_dust_exposure_msat: 0,
2522                         holding_cell_msat: 0,
2523                         on_holder_tx_holding_cell_htlcs_count: 0,
2524                 };
2525
2526                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2527                         (0, 0)
2528                 } else {
2529                         let dust_buffer_feerate = context.get_dust_buffer_feerate(outbound_feerate_update) as u64;
2530                         (dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000,
2531                                 dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000)
2532                 };
2533                 let counterparty_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2534                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2535                 for ref htlc in context.pending_outbound_htlcs.iter() {
2536                         stats.pending_htlcs_value_msat += htlc.amount_msat;
2537                         if htlc.amount_msat / 1000 < counterparty_dust_limit_success_sat {
2538                                 stats.on_counterparty_tx_dust_exposure_msat += htlc.amount_msat;
2539                         }
2540                         if htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat {
2541                                 stats.on_holder_tx_dust_exposure_msat += htlc.amount_msat;
2542                         }
2543                 }
2544
2545                 for update in context.holding_cell_htlc_updates.iter() {
2546                         if let &HTLCUpdateAwaitingACK::AddHTLC { ref amount_msat, .. } = update {
2547                                 stats.pending_htlcs += 1;
2548                                 stats.pending_htlcs_value_msat += amount_msat;
2549                                 stats.holding_cell_msat += amount_msat;
2550                                 if *amount_msat / 1000 < counterparty_dust_limit_success_sat {
2551                                         stats.on_counterparty_tx_dust_exposure_msat += amount_msat;
2552                                 }
2553                                 if *amount_msat / 1000 < holder_dust_limit_timeout_sat {
2554                                         stats.on_holder_tx_dust_exposure_msat += amount_msat;
2555                                 } else {
2556                                         stats.on_holder_tx_holding_cell_htlcs_count += 1;
2557                                 }
2558                         }
2559                 }
2560                 stats
2561         }
2562
2563         /// Returns information on all pending inbound HTLCs.
2564         pub fn get_pending_inbound_htlc_details(&self) -> Vec<InboundHTLCDetails> {
2565                 let mut holding_cell_states = new_hash_map();
2566                 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2567                         match holding_cell_update {
2568                                 HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
2569                                         holding_cell_states.insert(
2570                                                 htlc_id,
2571                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFulfill,
2572                                         );
2573                                 },
2574                                 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } => {
2575                                         holding_cell_states.insert(
2576                                                 htlc_id,
2577                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2578                                         );
2579                                 },
2580                                 HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } => {
2581                                         holding_cell_states.insert(
2582                                                 htlc_id,
2583                                                 InboundHTLCStateDetails::AwaitingRemoteRevokeToRemoveFail,
2584                                         );
2585                                 },
2586                                 // Outbound HTLC.
2587                                 HTLCUpdateAwaitingACK::AddHTLC { .. } => {},
2588                         }
2589                 }
2590                 let mut inbound_details = Vec::new();
2591                 let htlc_success_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2592                         0
2593                 } else {
2594                         let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2595                         dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2596                 };
2597                 let holder_dust_limit_success_sat = htlc_success_dust_limit + self.holder_dust_limit_satoshis;
2598                 for htlc in self.pending_inbound_htlcs.iter() {
2599                         if let Some(state_details) = (&htlc.state).into() {
2600                                 inbound_details.push(InboundHTLCDetails{
2601                                         htlc_id: htlc.htlc_id,
2602                                         amount_msat: htlc.amount_msat,
2603                                         cltv_expiry: htlc.cltv_expiry,
2604                                         payment_hash: htlc.payment_hash,
2605                                         state: Some(holding_cell_states.remove(&htlc.htlc_id).unwrap_or(state_details)),
2606                                         is_dust: htlc.amount_msat / 1000 < holder_dust_limit_success_sat,
2607                                 });
2608                         }
2609                 }
2610                 inbound_details
2611         }
2612
2613         /// Returns information on all pending outbound HTLCs.
2614         pub fn get_pending_outbound_htlc_details(&self) -> Vec<OutboundHTLCDetails> {
2615                 let mut outbound_details = Vec::new();
2616                 let htlc_timeout_dust_limit = if self.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2617                         0
2618                 } else {
2619                         let dust_buffer_feerate = self.get_dust_buffer_feerate(None) as u64;
2620                         dust_buffer_feerate * htlc_success_tx_weight(self.get_channel_type()) / 1000
2621                 };
2622                 let holder_dust_limit_timeout_sat = htlc_timeout_dust_limit + self.holder_dust_limit_satoshis;
2623                 for htlc in self.pending_outbound_htlcs.iter() {
2624                         outbound_details.push(OutboundHTLCDetails{
2625                                 htlc_id: Some(htlc.htlc_id),
2626                                 amount_msat: htlc.amount_msat,
2627                                 cltv_expiry: htlc.cltv_expiry,
2628                                 payment_hash: htlc.payment_hash,
2629                                 skimmed_fee_msat: htlc.skimmed_fee_msat,
2630                                 state: Some((&htlc.state).into()),
2631                                 is_dust: htlc.amount_msat / 1000 < holder_dust_limit_timeout_sat,
2632                         });
2633                 }
2634                 for holding_cell_update in self.holding_cell_htlc_updates.iter() {
2635                         if let HTLCUpdateAwaitingACK::AddHTLC {
2636                                 amount_msat,
2637                                 cltv_expiry,
2638                                 payment_hash,
2639                                 skimmed_fee_msat,
2640                                 ..
2641                         } = *holding_cell_update {
2642                                 outbound_details.push(OutboundHTLCDetails{
2643                                         htlc_id: None,
2644                                         amount_msat: amount_msat,
2645                                         cltv_expiry: cltv_expiry,
2646                                         payment_hash: payment_hash,
2647                                         skimmed_fee_msat: skimmed_fee_msat,
2648                                         state: Some(OutboundHTLCStateDetails::AwaitingRemoteRevokeToAdd),
2649                                         is_dust: amount_msat / 1000 < holder_dust_limit_timeout_sat,
2650                                 });
2651                         }
2652                 }
2653                 outbound_details
2654         }
2655
2656         /// Get the available balances, see [`AvailableBalances`]'s fields for more info.
2657         /// Doesn't bother handling the
2658         /// if-we-removed-it-already-but-haven't-fully-resolved-they-can-still-send-an-inbound-HTLC
2659         /// corner case properly.
2660         pub fn get_available_balances<F: Deref>(&self, fee_estimator: &LowerBoundedFeeEstimator<F>)
2661         -> AvailableBalances
2662         where F::Target: FeeEstimator
2663         {
2664                 let context = &self;
2665                 // Note that we have to handle overflow due to the above case.
2666                 let inbound_stats = context.get_inbound_pending_htlc_stats(None);
2667                 let outbound_stats = context.get_outbound_pending_htlc_stats(None);
2668
2669                 let mut balance_msat = context.value_to_self_msat;
2670                 for ref htlc in context.pending_inbound_htlcs.iter() {
2671                         if let InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(_)) = htlc.state {
2672                                 balance_msat += htlc.amount_msat;
2673                         }
2674                 }
2675                 balance_msat -= outbound_stats.pending_htlcs_value_msat;
2676
2677                 let outbound_capacity_msat = context.value_to_self_msat
2678                                 .saturating_sub(outbound_stats.pending_htlcs_value_msat)
2679                                 .saturating_sub(
2680                                         context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0) * 1000);
2681
2682                 let mut available_capacity_msat = outbound_capacity_msat;
2683
2684                 let anchor_outputs_value_msat = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2685                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
2686                 } else {
2687                         0
2688                 };
2689                 if context.is_outbound() {
2690                         // We should mind channel commit tx fee when computing how much of the available capacity
2691                         // can be used in the next htlc. Mirrors the logic in send_htlc.
2692                         //
2693                         // The fee depends on whether the amount we will be sending is above dust or not,
2694                         // and the answer will in turn change the amount itself â€” making it a circular
2695                         // dependency.
2696                         // This complicates the computation around dust-values, up to the one-htlc-value.
2697                         let mut real_dust_limit_timeout_sat = context.holder_dust_limit_satoshis;
2698                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2699                                 real_dust_limit_timeout_sat += context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000;
2700                         }
2701
2702                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000, HTLCInitiator::LocalOffered);
2703                         let mut max_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_above_dust, Some(()));
2704                         let htlc_dust = HTLCCandidate::new(real_dust_limit_timeout_sat * 1000 - 1, HTLCInitiator::LocalOffered);
2705                         let mut min_reserved_commit_tx_fee_msat = context.next_local_commit_tx_fee_msat(htlc_dust, Some(()));
2706                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2707                                 max_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2708                                 min_reserved_commit_tx_fee_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
2709                         }
2710
2711                         // We will first subtract the fee as if we were above-dust. Then, if the resulting
2712                         // value ends up being below dust, we have this fee available again. In that case,
2713                         // match the value to right-below-dust.
2714                         let mut capacity_minus_commitment_fee_msat: i64 = available_capacity_msat as i64 -
2715                                 max_reserved_commit_tx_fee_msat as i64 - anchor_outputs_value_msat as i64;
2716                         if capacity_minus_commitment_fee_msat < (real_dust_limit_timeout_sat as i64) * 1000 {
2717                                 let one_htlc_difference_msat = max_reserved_commit_tx_fee_msat - min_reserved_commit_tx_fee_msat;
2718                                 debug_assert!(one_htlc_difference_msat != 0);
2719                                 capacity_minus_commitment_fee_msat += one_htlc_difference_msat as i64;
2720                                 capacity_minus_commitment_fee_msat = cmp::min(real_dust_limit_timeout_sat as i64 * 1000 - 1, capacity_minus_commitment_fee_msat);
2721                                 available_capacity_msat = cmp::max(0, cmp::min(capacity_minus_commitment_fee_msat, available_capacity_msat as i64)) as u64;
2722                         } else {
2723                                 available_capacity_msat = capacity_minus_commitment_fee_msat as u64;
2724                         }
2725                 } else {
2726                         // If the channel is inbound (i.e. counterparty pays the fee), we need to make sure
2727                         // sending a new HTLC won't reduce their balance below our reserve threshold.
2728                         let mut real_dust_limit_success_sat = context.counterparty_dust_limit_satoshis;
2729                         if !context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2730                                 real_dust_limit_success_sat += context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000;
2731                         }
2732
2733                         let htlc_above_dust = HTLCCandidate::new(real_dust_limit_success_sat * 1000, HTLCInitiator::LocalOffered);
2734                         let max_reserved_commit_tx_fee_msat = context.next_remote_commit_tx_fee_msat(htlc_above_dust, None);
2735
2736                         let holder_selected_chan_reserve_msat = context.holder_selected_channel_reserve_satoshis * 1000;
2737                         let remote_balance_msat = (context.channel_value_satoshis * 1000 - context.value_to_self_msat)
2738                                 .saturating_sub(inbound_stats.pending_htlcs_value_msat);
2739
2740                         if remote_balance_msat < max_reserved_commit_tx_fee_msat + holder_selected_chan_reserve_msat + anchor_outputs_value_msat {
2741                                 // If another HTLC's fee would reduce the remote's balance below the reserve limit
2742                                 // we've selected for them, we can only send dust HTLCs.
2743                                 available_capacity_msat = cmp::min(available_capacity_msat, real_dust_limit_success_sat * 1000 - 1);
2744                         }
2745                 }
2746
2747                 let mut next_outbound_htlc_minimum_msat = context.counterparty_htlc_minimum_msat;
2748
2749                 // If we get close to our maximum dust exposure, we end up in a situation where we can send
2750                 // between zero and the remaining dust exposure limit remaining OR above the dust limit.
2751                 // Because we cannot express this as a simple min/max, we prefer to tell the user they can
2752                 // send above the dust limit (as the router can always overpay to meet the dust limit).
2753                 let mut remaining_msat_below_dust_exposure_limit = None;
2754                 let mut dust_exposure_dust_limit_msat = 0;
2755                 let max_dust_htlc_exposure_msat = context.get_max_dust_htlc_exposure_msat(fee_estimator);
2756
2757                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2758                         (context.counterparty_dust_limit_satoshis, context.holder_dust_limit_satoshis)
2759                 } else {
2760                         let dust_buffer_feerate = context.get_dust_buffer_feerate(None) as u64;
2761                         (context.counterparty_dust_limit_satoshis + dust_buffer_feerate * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2762                          context.holder_dust_limit_satoshis       + dust_buffer_feerate * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2763                 };
2764                 let on_counterparty_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
2765                 if on_counterparty_dust_htlc_exposure_msat as i64 + htlc_success_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2766                         remaining_msat_below_dust_exposure_limit =
2767                                 Some(max_dust_htlc_exposure_msat.saturating_sub(on_counterparty_dust_htlc_exposure_msat));
2768                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_success_dust_limit * 1000);
2769                 }
2770
2771                 let on_holder_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
2772                 if on_holder_dust_htlc_exposure_msat as i64 + htlc_timeout_dust_limit as i64 * 1000 - 1 > max_dust_htlc_exposure_msat.try_into().unwrap_or(i64::max_value()) {
2773                         remaining_msat_below_dust_exposure_limit = Some(cmp::min(
2774                                 remaining_msat_below_dust_exposure_limit.unwrap_or(u64::max_value()),
2775                                 max_dust_htlc_exposure_msat.saturating_sub(on_holder_dust_htlc_exposure_msat)));
2776                         dust_exposure_dust_limit_msat = cmp::max(dust_exposure_dust_limit_msat, htlc_timeout_dust_limit * 1000);
2777                 }
2778
2779                 if let Some(remaining_limit_msat) = remaining_msat_below_dust_exposure_limit {
2780                         if available_capacity_msat < dust_exposure_dust_limit_msat {
2781                                 available_capacity_msat = cmp::min(available_capacity_msat, remaining_limit_msat);
2782                         } else {
2783                                 next_outbound_htlc_minimum_msat = cmp::max(next_outbound_htlc_minimum_msat, dust_exposure_dust_limit_msat);
2784                         }
2785                 }
2786
2787                 available_capacity_msat = cmp::min(available_capacity_msat,
2788                         context.counterparty_max_htlc_value_in_flight_msat - outbound_stats.pending_htlcs_value_msat);
2789
2790                 if outbound_stats.pending_htlcs + 1 > context.counterparty_max_accepted_htlcs as u32 {
2791                         available_capacity_msat = 0;
2792                 }
2793
2794                 AvailableBalances {
2795                         inbound_capacity_msat: cmp::max(context.channel_value_satoshis as i64 * 1000
2796                                         - context.value_to_self_msat as i64
2797                                         - context.get_inbound_pending_htlc_stats(None).pending_htlcs_value_msat as i64
2798                                         - context.holder_selected_channel_reserve_satoshis as i64 * 1000,
2799                                 0) as u64,
2800                         outbound_capacity_msat,
2801                         next_outbound_htlc_limit_msat: available_capacity_msat,
2802                         next_outbound_htlc_minimum_msat,
2803                         balance_msat,
2804                 }
2805         }
2806
2807         pub fn get_holder_counterparty_selected_channel_reserve_satoshis(&self) -> (u64, Option<u64>) {
2808                 let context = &self;
2809                 (context.holder_selected_channel_reserve_satoshis, context.counterparty_selected_channel_reserve_satoshis)
2810         }
2811
2812         /// Get the commitment tx fee for the local's (i.e. our) next commitment transaction based on the
2813         /// number of pending HTLCs that are on track to be in our next commitment tx.
2814         ///
2815         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2816         /// `fee_spike_buffer_htlc` is `Some`.
2817         ///
2818         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2819         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2820         ///
2821         /// Dust HTLCs are excluded.
2822         fn next_local_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2823                 let context = &self;
2824                 assert!(context.is_outbound());
2825
2826                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2827                         (0, 0)
2828                 } else {
2829                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2830                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2831                 };
2832                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.holder_dust_limit_satoshis;
2833                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.holder_dust_limit_satoshis;
2834
2835                 let mut addl_htlcs = 0;
2836                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2837                 match htlc.origin {
2838                         HTLCInitiator::LocalOffered => {
2839                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2840                                         addl_htlcs += 1;
2841                                 }
2842                         },
2843                         HTLCInitiator::RemoteOffered => {
2844                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2845                                         addl_htlcs += 1;
2846                                 }
2847                         }
2848                 }
2849
2850                 let mut included_htlcs = 0;
2851                 for ref htlc in context.pending_inbound_htlcs.iter() {
2852                         if htlc.amount_msat / 1000 < real_dust_limit_success_sat {
2853                                 continue
2854                         }
2855                         // We include LocalRemoved HTLCs here because we may still need to broadcast a commitment
2856                         // transaction including this HTLC if it times out before they RAA.
2857                         included_htlcs += 1;
2858                 }
2859
2860                 for ref htlc in context.pending_outbound_htlcs.iter() {
2861                         if htlc.amount_msat / 1000 < real_dust_limit_timeout_sat {
2862                                 continue
2863                         }
2864                         match htlc.state {
2865                                 OutboundHTLCState::LocalAnnounced {..} => included_htlcs += 1,
2866                                 OutboundHTLCState::Committed => included_htlcs += 1,
2867                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2868                                 // We don't include AwaitingRemoteRevokeToRemove HTLCs because our next commitment
2869                                 // transaction won't be generated until they send us their next RAA, which will mean
2870                                 // dropping any HTLCs in this state.
2871                                 _ => {},
2872                         }
2873                 }
2874
2875                 for htlc in context.holding_cell_htlc_updates.iter() {
2876                         match htlc {
2877                                 &HTLCUpdateAwaitingACK::AddHTLC { amount_msat, .. } => {
2878                                         if amount_msat / 1000 < real_dust_limit_timeout_sat {
2879                                                 continue
2880                                         }
2881                                         included_htlcs += 1
2882                                 },
2883                                 _ => {}, // Don't include claims/fails that are awaiting ack, because once we get the
2884                                          // ack we're guaranteed to never include them in commitment txs anymore.
2885                         }
2886                 }
2887
2888                 let num_htlcs = included_htlcs + addl_htlcs;
2889                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2890                 #[cfg(any(test, fuzzing))]
2891                 {
2892                         let mut fee = res;
2893                         if fee_spike_buffer_htlc.is_some() {
2894                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2895                         }
2896                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len()
2897                                 + context.holding_cell_htlc_updates.len();
2898                         let commitment_tx_info = CommitmentTxInfoCached {
2899                                 fee,
2900                                 total_pending_htlcs,
2901                                 next_holder_htlc_id: match htlc.origin {
2902                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2903                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2904                                 },
2905                                 next_counterparty_htlc_id: match htlc.origin {
2906                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2907                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2908                                 },
2909                                 feerate: context.feerate_per_kw,
2910                         };
2911                         *context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
2912                 }
2913                 res
2914         }
2915
2916         /// Get the commitment tx fee for the remote's next commitment transaction based on the number of
2917         /// pending HTLCs that are on track to be in their next commitment tx
2918         ///
2919         /// Optionally includes the `HTLCCandidate` given by `htlc` and an additional non-dust HTLC if
2920         /// `fee_spike_buffer_htlc` is `Some`.
2921         ///
2922         /// The first extra HTLC is useful for determining whether we can accept a further HTLC, the
2923         /// second allows for creating a buffer to ensure a further HTLC can always be accepted/added.
2924         ///
2925         /// Dust HTLCs are excluded.
2926         fn next_remote_commit_tx_fee_msat(&self, htlc: HTLCCandidate, fee_spike_buffer_htlc: Option<()>) -> u64 {
2927                 let context = &self;
2928                 assert!(!context.is_outbound());
2929
2930                 let (htlc_success_dust_limit, htlc_timeout_dust_limit) = if context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
2931                         (0, 0)
2932                 } else {
2933                         (context.feerate_per_kw as u64 * htlc_success_tx_weight(context.get_channel_type()) / 1000,
2934                                 context.feerate_per_kw as u64 * htlc_timeout_tx_weight(context.get_channel_type()) / 1000)
2935                 };
2936                 let real_dust_limit_success_sat = htlc_success_dust_limit + context.counterparty_dust_limit_satoshis;
2937                 let real_dust_limit_timeout_sat = htlc_timeout_dust_limit + context.counterparty_dust_limit_satoshis;
2938
2939                 let mut addl_htlcs = 0;
2940                 if fee_spike_buffer_htlc.is_some() { addl_htlcs += 1; }
2941                 match htlc.origin {
2942                         HTLCInitiator::LocalOffered => {
2943                                 if htlc.amount_msat / 1000 >= real_dust_limit_success_sat {
2944                                         addl_htlcs += 1;
2945                                 }
2946                         },
2947                         HTLCInitiator::RemoteOffered => {
2948                                 if htlc.amount_msat / 1000 >= real_dust_limit_timeout_sat {
2949                                         addl_htlcs += 1;
2950                                 }
2951                         }
2952                 }
2953
2954                 // When calculating the set of HTLCs which will be included in their next commitment_signed, all
2955                 // non-dust inbound HTLCs are included (as all states imply it will be included) and only
2956                 // committed outbound HTLCs, see below.
2957                 let mut included_htlcs = 0;
2958                 for ref htlc in context.pending_inbound_htlcs.iter() {
2959                         if htlc.amount_msat / 1000 <= real_dust_limit_timeout_sat {
2960                                 continue
2961                         }
2962                         included_htlcs += 1;
2963                 }
2964
2965                 for ref htlc in context.pending_outbound_htlcs.iter() {
2966                         if htlc.amount_msat / 1000 <= real_dust_limit_success_sat {
2967                                 continue
2968                         }
2969                         // We only include outbound HTLCs if it will not be included in their next commitment_signed,
2970                         // i.e. if they've responded to us with an RAA after announcement.
2971                         match htlc.state {
2972                                 OutboundHTLCState::Committed => included_htlcs += 1,
2973                                 OutboundHTLCState::RemoteRemoved {..} => included_htlcs += 1,
2974                                 OutboundHTLCState::LocalAnnounced { .. } => included_htlcs += 1,
2975                                 _ => {},
2976                         }
2977                 }
2978
2979                 let num_htlcs = included_htlcs + addl_htlcs;
2980                 let res = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs, &context.channel_type);
2981                 #[cfg(any(test, fuzzing))]
2982                 {
2983                         let mut fee = res;
2984                         if fee_spike_buffer_htlc.is_some() {
2985                                 fee = commit_tx_fee_msat(context.feerate_per_kw, num_htlcs - 1, &context.channel_type);
2986                         }
2987                         let total_pending_htlcs = context.pending_inbound_htlcs.len() + context.pending_outbound_htlcs.len();
2988                         let commitment_tx_info = CommitmentTxInfoCached {
2989                                 fee,
2990                                 total_pending_htlcs,
2991                                 next_holder_htlc_id: match htlc.origin {
2992                                         HTLCInitiator::LocalOffered => context.next_holder_htlc_id + 1,
2993                                         HTLCInitiator::RemoteOffered => context.next_holder_htlc_id,
2994                                 },
2995                                 next_counterparty_htlc_id: match htlc.origin {
2996                                         HTLCInitiator::LocalOffered => context.next_counterparty_htlc_id,
2997                                         HTLCInitiator::RemoteOffered => context.next_counterparty_htlc_id + 1,
2998                                 },
2999                                 feerate: context.feerate_per_kw,
3000                         };
3001                         *context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = Some(commitment_tx_info);
3002                 }
3003                 res
3004         }
3005
3006         fn if_unbroadcasted_funding<F, O>(&self, f: F) -> Option<O> where F: Fn() -> Option<O> {
3007                 match self.channel_state {
3008                         ChannelState::FundingNegotiated => f(),
3009                         ChannelState::AwaitingChannelReady(flags) =>
3010                                 if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) ||
3011                                         flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into())
3012                                 {
3013                                         f()
3014                                 } else {
3015                                         None
3016                                 },
3017                         _ => None,
3018                 }
3019         }
3020
3021         /// Returns the transaction if there is a pending funding transaction that is yet to be
3022         /// broadcast.
3023         pub fn unbroadcasted_funding(&self) -> Option<Transaction> {
3024                 self.if_unbroadcasted_funding(|| self.funding_transaction.clone())
3025         }
3026
3027         /// Returns the transaction ID if there is a pending funding transaction that is yet to be
3028         /// broadcast.
3029         pub fn unbroadcasted_funding_txid(&self) -> Option<Txid> {
3030                 self.if_unbroadcasted_funding(||
3031                         self.channel_transaction_parameters.funding_outpoint.map(|txo| txo.txid)
3032                 )
3033         }
3034
3035         /// Returns whether the channel is funded in a batch.
3036         pub fn is_batch_funding(&self) -> bool {
3037                 self.is_batch_funding.is_some()
3038         }
3039
3040         /// Returns the transaction ID if there is a pending batch funding transaction that is yet to be
3041         /// broadcast.
3042         pub fn unbroadcasted_batch_funding_txid(&self) -> Option<Txid> {
3043                 self.unbroadcasted_funding_txid().filter(|_| self.is_batch_funding())
3044         }
3045
3046         /// Gets the latest commitment transaction and any dependent transactions for relay (forcing
3047         /// shutdown of this channel - no more calls into this Channel may be made afterwards except
3048         /// those explicitly stated to be allowed after shutdown completes, eg some simple getters).
3049         /// Also returns the list of payment_hashes for channels which we can safely fail backwards
3050         /// immediately (others we will have to allow to time out).
3051         pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult {
3052                 // Note that we MUST only generate a monitor update that indicates force-closure - we're
3053                 // called during initialization prior to the chain_monitor in the encompassing ChannelManager
3054                 // being fully configured in some cases. Thus, its likely any monitor events we generate will
3055                 // be delayed in being processed! See the docs for `ChannelManagerReadArgs` for more.
3056                 assert!(!matches!(self.channel_state, ChannelState::ShutdownComplete));
3057
3058                 // We go ahead and "free" any holding cell HTLCs or HTLCs we haven't yet committed to and
3059                 // return them to fail the payment.
3060                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.holding_cell_htlc_updates.len());
3061                 let counterparty_node_id = self.get_counterparty_node_id();
3062                 for htlc_update in self.holding_cell_htlc_updates.drain(..) {
3063                         match htlc_update {
3064                                 HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. } => {
3065                                         dropped_outbound_htlcs.push((source, payment_hash, counterparty_node_id, self.channel_id));
3066                                 },
3067                                 _ => {}
3068                         }
3069                 }
3070                 let monitor_update = if let Some(funding_txo) = self.get_funding_txo() {
3071                         // If we haven't yet exchanged funding signatures (ie channel_state < AwaitingChannelReady),
3072                         // returning a channel monitor update here would imply a channel monitor update before
3073                         // we even registered the channel monitor to begin with, which is invalid.
3074                         // Thus, if we aren't actually at a point where we could conceivably broadcast the
3075                         // funding transaction, don't return a funding txo (which prevents providing the
3076                         // monitor update to the user, even if we return one).
3077                         // See test_duplicate_chan_id and test_pre_lockin_no_chan_closed_update for more.
3078                         if !self.channel_state.is_pre_funded_state() {
3079                                 self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID;
3080                                 Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate {
3081                                         update_id: self.latest_monitor_update_id,
3082                                         counterparty_node_id: Some(self.counterparty_node_id),
3083                                         updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }],
3084                                         channel_id: Some(self.channel_id()),
3085                                 }))
3086                         } else { None }
3087                 } else { None };
3088                 let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid();
3089                 let unbroadcasted_funding_tx = self.unbroadcasted_funding();
3090
3091                 self.channel_state = ChannelState::ShutdownComplete;
3092                 self.update_time_counter += 1;
3093                 ShutdownResult {
3094                         closure_reason,
3095                         monitor_update,
3096                         dropped_outbound_htlcs,
3097                         unbroadcasted_batch_funding_txid,
3098                         channel_id: self.channel_id,
3099                         user_channel_id: self.user_id,
3100                         channel_capacity_satoshis: self.channel_value_satoshis,
3101                         counterparty_node_id: self.counterparty_node_id,
3102                         unbroadcasted_funding_tx,
3103                         channel_funding_txo: self.get_funding_txo(),
3104                 }
3105         }
3106
3107         /// Only allowed after [`Self::channel_transaction_parameters`] is set.
3108         fn get_funding_signed_msg<L: Deref>(&mut self, logger: &L) -> (CommitmentTransaction, Option<msgs::FundingSigned>) where L::Target: Logger {
3109                 let counterparty_keys = self.build_remote_transaction_keys();
3110                 let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number + 1, &counterparty_keys, false, false, logger).tx;
3111
3112                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
3113                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
3114                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
3115                         &self.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
3116
3117                 match &self.holder_signer {
3118                         // TODO (arik): move match into calling method for Taproot
3119                         ChannelSignerType::Ecdsa(ecdsa) => {
3120                                 let funding_signed = ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx)
3121                                         .map(|(signature, _)| msgs::FundingSigned {
3122                                                 channel_id: self.channel_id(),
3123                                                 signature,
3124                                                 #[cfg(taproot)]
3125                                                 partial_signature_with_nonce: None,
3126                                         })
3127                                         .ok();
3128
3129                                 if funding_signed.is_none() {
3130                                         #[cfg(not(async_signing))] {
3131                                                 panic!("Failed to get signature for funding_signed");
3132                                         }
3133                                         #[cfg(async_signing)] {
3134                                                 log_trace!(logger, "Counterparty commitment signature not available for funding_signed message; setting signer_pending_funding");
3135                                                 self.signer_pending_funding = true;
3136                                         }
3137                                 } else if self.signer_pending_funding {
3138                                         log_trace!(logger, "Counterparty commitment signature available for funding_signed message; clearing signer_pending_funding");
3139                                         self.signer_pending_funding = false;
3140                                 }
3141
3142                                 // We sign "counterparty" commitment transaction, allowing them to broadcast the tx if they wish.
3143                                 (counterparty_initial_commitment_tx, funding_signed)
3144                         },
3145                         // TODO (taproot|arik)
3146                         #[cfg(taproot)]
3147                         _ => todo!()
3148                 }
3149         }
3150 }
3151
3152 // Internal utility functions for channels
3153
3154 /// Returns the value to use for `holder_max_htlc_value_in_flight_msat` as a percentage of the
3155 /// `channel_value_satoshis` in msat, set through
3156 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]
3157 ///
3158 /// The effective percentage is lower bounded by 1% and upper bounded by 100%.
3159 ///
3160 /// [`ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel`]: crate::util::config::ChannelHandshakeConfig::max_inbound_htlc_value_in_flight_percent_of_channel
3161 fn get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis: u64, config: &ChannelHandshakeConfig) -> u64 {
3162         let configured_percent = if config.max_inbound_htlc_value_in_flight_percent_of_channel < 1 {
3163                 1
3164         } else if config.max_inbound_htlc_value_in_flight_percent_of_channel > 100 {
3165                 100
3166         } else {
3167                 config.max_inbound_htlc_value_in_flight_percent_of_channel as u64
3168         };
3169         channel_value_satoshis * 10 * configured_percent
3170 }
3171
3172 /// Returns a minimum channel reserve value the remote needs to maintain,
3173 /// required by us according to the configured or default
3174 /// [`ChannelHandshakeConfig::their_channel_reserve_proportional_millionths`]
3175 ///
3176 /// Guaranteed to return a value no larger than channel_value_satoshis
3177 ///
3178 /// This is used both for outbound and inbound channels and has lower bound
3179 /// of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`.
3180 pub(crate) fn get_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64, config: &UserConfig) -> u64 {
3181         let calculated_reserve = channel_value_satoshis.saturating_mul(config.channel_handshake_config.their_channel_reserve_proportional_millionths as u64) / 1_000_000;
3182         cmp::min(channel_value_satoshis, cmp::max(calculated_reserve, MIN_THEIR_CHAN_RESERVE_SATOSHIS))
3183 }
3184
3185 /// This is for legacy reasons, present for forward-compatibility.
3186 /// LDK versions older than 0.0.104 don't know how read/handle values other than default
3187 /// from storage. Hence, we use this function to not persist default values of
3188 /// `holder_selected_channel_reserve_satoshis` for channels into storage.
3189 pub(crate) fn get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis: u64) -> u64 {
3190         let (q, _) = channel_value_satoshis.overflowing_div(100);
3191         cmp::min(channel_value_satoshis, cmp::max(q, 1000))
3192 }
3193
3194 // Get the fee cost in SATS of a commitment tx with a given number of HTLC outputs.
3195 // Note that num_htlcs should not include dust HTLCs.
3196 #[inline]
3197 fn commit_tx_fee_sat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3198         feerate_per_kw as u64 * (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000
3199 }
3200
3201 // Get the fee cost in MSATS of a commitment tx with a given number of HTLC outputs.
3202 // Note that num_htlcs should not include dust HTLCs.
3203 pub(crate) fn commit_tx_fee_msat(feerate_per_kw: u32, num_htlcs: usize, channel_type_features: &ChannelTypeFeatures) -> u64 {
3204         // Note that we need to divide before multiplying to round properly,
3205         // since the lowest denomination of bitcoin on-chain is the satoshi.
3206         (commitment_tx_base_weight(channel_type_features) + num_htlcs as u64 * COMMITMENT_TX_WEIGHT_PER_HTLC) * feerate_per_kw as u64 / 1000 * 1000
3207 }
3208
3209 /// Context for dual-funded channels.
3210 #[cfg(dual_funding)]
3211 pub(super) struct DualFundingChannelContext {
3212         /// The amount in satoshis we will be contributing to the channel.
3213         pub our_funding_satoshis: u64,
3214         /// The amount in satoshis our counterparty will be contributing to the channel.
3215         pub their_funding_satoshis: u64,
3216         /// The funding transaction locktime suggested by the initiator. If set by us, it is always set
3217         /// to the current block height to align incentives against fee-sniping.
3218         pub funding_tx_locktime: u32,
3219         /// The feerate set by the initiator to be used for the funding transaction.
3220         pub funding_feerate_sat_per_1000_weight: u32,
3221 }
3222
3223 // Holder designates channel data owned for the benefit of the user client.
3224 // Counterparty designates channel data owned by the another channel participant entity.
3225 pub(super) struct Channel<SP: Deref> where SP::Target: SignerProvider {
3226         pub context: ChannelContext<SP>,
3227 }
3228
3229 #[cfg(any(test, fuzzing))]
3230 struct CommitmentTxInfoCached {
3231         fee: u64,
3232         total_pending_htlcs: usize,
3233         next_holder_htlc_id: u64,
3234         next_counterparty_htlc_id: u64,
3235         feerate: u32,
3236 }
3237
3238 /// Contents of a wire message that fails an HTLC backwards. Useful for [`Channel::fail_htlc`] to
3239 /// fail with either [`msgs::UpdateFailMalformedHTLC`] or [`msgs::UpdateFailHTLC`] as needed.
3240 trait FailHTLCContents {
3241         type Message: FailHTLCMessageName;
3242         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message;
3243         fn to_inbound_htlc_state(self) -> InboundHTLCState;
3244         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK;
3245 }
3246 impl FailHTLCContents for msgs::OnionErrorPacket {
3247         type Message = msgs::UpdateFailHTLC;
3248         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3249                 msgs::UpdateFailHTLC { htlc_id, channel_id, reason: self }
3250         }
3251         fn to_inbound_htlc_state(self) -> InboundHTLCState {
3252                 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(self))
3253         }
3254         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3255                 HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet: self }
3256         }
3257 }
3258 impl FailHTLCContents for ([u8; 32], u16) {
3259         type Message = msgs::UpdateFailMalformedHTLC;
3260         fn to_message(self, htlc_id: u64, channel_id: ChannelId) -> Self::Message {
3261                 msgs::UpdateFailMalformedHTLC {
3262                         htlc_id,
3263                         channel_id,
3264                         sha256_of_onion: self.0,
3265                         failure_code: self.1
3266                 }
3267         }
3268         fn to_inbound_htlc_state(self) -> InboundHTLCState {
3269                 InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed(self))
3270         }
3271         fn to_htlc_update_awaiting_ack(self, htlc_id: u64) -> HTLCUpdateAwaitingACK {
3272                 HTLCUpdateAwaitingACK::FailMalformedHTLC {
3273                         htlc_id,
3274                         sha256_of_onion: self.0,
3275                         failure_code: self.1
3276                 }
3277         }
3278 }
3279
3280 trait FailHTLCMessageName {
3281         fn name() -> &'static str;
3282 }
3283 impl FailHTLCMessageName for msgs::UpdateFailHTLC {
3284         fn name() -> &'static str {
3285                 "update_fail_htlc"
3286         }
3287 }
3288 impl FailHTLCMessageName for msgs::UpdateFailMalformedHTLC {
3289         fn name() -> &'static str {
3290                 "update_fail_malformed_htlc"
3291         }
3292 }
3293
3294 impl<SP: Deref> Channel<SP> where
3295         SP::Target: SignerProvider,
3296         <SP::Target as SignerProvider>::EcdsaSigner: WriteableEcdsaChannelSigner
3297 {
3298         fn check_remote_fee<F: Deref, L: Deref>(
3299                 channel_type: &ChannelTypeFeatures, fee_estimator: &LowerBoundedFeeEstimator<F>,
3300                 feerate_per_kw: u32, cur_feerate_per_kw: Option<u32>, logger: &L
3301         ) -> Result<(), ChannelError> where F::Target: FeeEstimator, L::Target: Logger,
3302         {
3303                 let lower_limit_conf_target = if channel_type.supports_anchors_zero_fee_htlc_tx() {
3304                         ConfirmationTarget::MinAllowedAnchorChannelRemoteFee
3305                 } else {
3306                         ConfirmationTarget::MinAllowedNonAnchorChannelRemoteFee
3307                 };
3308                 let lower_limit = fee_estimator.bounded_sat_per_1000_weight(lower_limit_conf_target);
3309                 if feerate_per_kw < lower_limit {
3310                         if let Some(cur_feerate) = cur_feerate_per_kw {
3311                                 if feerate_per_kw > cur_feerate {
3312                                         log_warn!(logger,
3313                                                 "Accepting feerate that may prevent us from closing this channel because it's higher than what we have now. Had {} s/kW, now {} s/kW.",
3314                                                 cur_feerate, feerate_per_kw);
3315                                         return Ok(());
3316                                 }
3317                         }
3318                         return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit)));
3319                 }
3320                 Ok(())
3321         }
3322
3323         #[inline]
3324         fn get_closing_scriptpubkey(&self) -> ScriptBuf {
3325                 // The shutdown scriptpubkey is set on channel opening when option_upfront_shutdown_script
3326                 // is signaled. Otherwise, it is set when sending a shutdown message. Calling this method
3327                 // outside of those situations will fail.
3328                 self.context.shutdown_scriptpubkey.clone().unwrap().into_inner()
3329         }
3330
3331         #[inline]
3332         fn get_closing_transaction_weight(&self, a_scriptpubkey: Option<&Script>, b_scriptpubkey: Option<&Script>) -> u64 {
3333                 let mut ret =
3334                 (4 +                                                   // version
3335                  1 +                                                   // input count
3336                  36 +                                                  // prevout
3337                  1 +                                                   // script length (0)
3338                  4 +                                                   // sequence
3339                  1 +                                                   // output count
3340                  4                                                     // lock time
3341                  )*4 +                                                 // * 4 for non-witness parts
3342                 2 +                                                    // witness marker and flag
3343                 1 +                                                    // witness element count
3344                 4 +                                                    // 4 element lengths (2 sigs, multisig dummy, and witness script)
3345                 self.context.get_funding_redeemscript().len() as u64 + // funding witness script
3346                 2*(1 + 71);                                            // two signatures + sighash type flags
3347                 if let Some(spk) = a_scriptpubkey {
3348                         ret += ((8+1) +                                    // output values and script length
3349                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
3350                 }
3351                 if let Some(spk) = b_scriptpubkey {
3352                         ret += ((8+1) +                                    // output values and script length
3353                                 spk.len() as u64) * 4;                         // scriptpubkey and witness multiplier
3354                 }
3355                 ret
3356         }
3357
3358         #[inline]
3359         fn build_closing_transaction(&self, proposed_total_fee_satoshis: u64, skip_remote_output: bool) -> (ClosingTransaction, u64) {
3360                 assert!(self.context.pending_inbound_htlcs.is_empty());
3361                 assert!(self.context.pending_outbound_htlcs.is_empty());
3362                 assert!(self.context.pending_update_fee.is_none());
3363
3364                 let mut total_fee_satoshis = proposed_total_fee_satoshis;
3365                 let mut value_to_holder: i64 = (self.context.value_to_self_msat as i64) / 1000 - if self.context.is_outbound() { total_fee_satoshis as i64 } else { 0 };
3366                 let mut value_to_counterparty: i64 = ((self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat) as i64 / 1000) - if self.context.is_outbound() { 0 } else { total_fee_satoshis as i64 };
3367
3368                 if value_to_holder < 0 {
3369                         assert!(self.context.is_outbound());
3370                         total_fee_satoshis += (-value_to_holder) as u64;
3371                 } else if value_to_counterparty < 0 {
3372                         assert!(!self.context.is_outbound());
3373                         total_fee_satoshis += (-value_to_counterparty) as u64;
3374                 }
3375
3376                 if skip_remote_output || value_to_counterparty as u64 <= self.context.holder_dust_limit_satoshis {
3377                         value_to_counterparty = 0;
3378                 }
3379
3380                 if value_to_holder as u64 <= self.context.holder_dust_limit_satoshis {
3381                         value_to_holder = 0;
3382                 }
3383
3384                 assert!(self.context.shutdown_scriptpubkey.is_some());
3385                 let holder_shutdown_script = self.get_closing_scriptpubkey();
3386                 let counterparty_shutdown_script = self.context.counterparty_shutdown_scriptpubkey.clone().unwrap();
3387                 let funding_outpoint = self.funding_outpoint().into_bitcoin_outpoint();
3388
3389                 let closing_transaction = ClosingTransaction::new(value_to_holder as u64, value_to_counterparty as u64, holder_shutdown_script, counterparty_shutdown_script, funding_outpoint);
3390                 (closing_transaction, total_fee_satoshis)
3391         }
3392
3393         fn funding_outpoint(&self) -> OutPoint {
3394                 self.context.channel_transaction_parameters.funding_outpoint.unwrap()
3395         }
3396
3397         /// Claims an HTLC while we're disconnected from a peer, dropping the [`ChannelMonitorUpdate`]
3398         /// entirely.
3399         ///
3400         /// The [`ChannelMonitor`] for this channel MUST be updated out-of-band with the preimage
3401         /// provided (i.e. without calling [`crate::chain::Watch::update_channel`]).
3402         ///
3403         /// The HTLC claim will end up in the holding cell (because the caller must ensure the peer is
3404         /// disconnected).
3405         pub fn claim_htlc_while_disconnected_dropping_mon_update<L: Deref>
3406                 (&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L)
3407         where L::Target: Logger {
3408                 // Assert that we'll add the HTLC claim to the holding cell in `get_update_fulfill_htlc`
3409                 // (see equivalent if condition there).
3410                 assert!(!self.context.channel_state.can_generate_new_commitment());
3411                 let mon_update_id = self.context.latest_monitor_update_id; // Forget the ChannelMonitor update
3412                 let fulfill_resp = self.get_update_fulfill_htlc(htlc_id_arg, payment_preimage_arg, logger);
3413                 self.context.latest_monitor_update_id = mon_update_id;
3414                 if let UpdateFulfillFetch::NewClaim { msg, .. } = fulfill_resp {
3415                         assert!(msg.is_none()); // The HTLC must have ended up in the holding cell.
3416                 }
3417         }
3418
3419         fn get_update_fulfill_htlc<L: Deref>(&mut self, htlc_id_arg: u64, payment_preimage_arg: PaymentPreimage, logger: &L) -> UpdateFulfillFetch where L::Target: Logger {
3420                 // Either ChannelReady got set (which means it won't be unset) or there is no way any
3421                 // caller thought we could have something claimed (cause we wouldn't have accepted in an
3422                 // incoming HTLC anyway). If we got to ShutdownComplete, callers aren't allowed to call us,
3423                 // either.
3424                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3425                         panic!("Was asked to fulfill an HTLC when channel was not in an operational state");
3426                 }
3427
3428                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3429                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3430                 // these, but for now we just have to treat them as normal.
3431
3432                 let mut pending_idx = core::usize::MAX;
3433                 let mut htlc_value_msat = 0;
3434                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3435                         if htlc.htlc_id == htlc_id_arg {
3436                                 debug_assert_eq!(htlc.payment_hash, PaymentHash(Sha256::hash(&payment_preimage_arg.0[..]).to_byte_array()));
3437                                 log_debug!(logger, "Claiming inbound HTLC id {} with payment hash {} with preimage {}",
3438                                         htlc.htlc_id, htlc.payment_hash, payment_preimage_arg);
3439                                 match htlc.state {
3440                                         InboundHTLCState::Committed => {},
3441                                         InboundHTLCState::LocalRemoved(ref reason) => {
3442                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3443                                                 } else {
3444                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with payment hash {} we already failed against channel {}", &htlc.payment_hash, &self.context.channel_id());
3445                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3446                                                 }
3447                                                 return UpdateFulfillFetch::DuplicateClaim {};
3448                                         },
3449                                         _ => {
3450                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3451                                                 // Don't return in release mode here so that we can update channel_monitor
3452                                         }
3453                                 }
3454                                 pending_idx = idx;
3455                                 htlc_value_msat = htlc.amount_msat;
3456                                 break;
3457                         }
3458                 }
3459                 if pending_idx == core::usize::MAX {
3460                         #[cfg(any(test, fuzzing))]
3461                         // If we failed to find an HTLC to fulfill, make sure it was previously fulfilled and
3462                         // this is simply a duplicate claim, not previously failed and we lost funds.
3463                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3464                         return UpdateFulfillFetch::DuplicateClaim {};
3465                 }
3466
3467                 // Now update local state:
3468                 //
3469                 // We have to put the payment_preimage in the channel_monitor right away here to ensure we
3470                 // can claim it even if the channel hits the chain before we see their next commitment.
3471                 self.context.latest_monitor_update_id += 1;
3472                 let monitor_update = ChannelMonitorUpdate {
3473                         update_id: self.context.latest_monitor_update_id,
3474                         counterparty_node_id: Some(self.context.counterparty_node_id),
3475                         updates: vec![ChannelMonitorUpdateStep::PaymentPreimage {
3476                                 payment_preimage: payment_preimage_arg.clone(),
3477                         }],
3478                         channel_id: Some(self.context.channel_id()),
3479                 };
3480
3481                 if !self.context.channel_state.can_generate_new_commitment() {
3482                         // Note that this condition is the same as the assertion in
3483                         // `claim_htlc_while_disconnected_dropping_mon_update` and must match exactly -
3484                         // `claim_htlc_while_disconnected_dropping_mon_update` would not work correctly if we
3485                         // do not not get into this branch.
3486                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
3487                                 match pending_update {
3488                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3489                                                 if htlc_id_arg == htlc_id {
3490                                                         // Make sure we don't leave latest_monitor_update_id incremented here:
3491                                                         self.context.latest_monitor_update_id -= 1;
3492                                                         #[cfg(any(test, fuzzing))]
3493                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3494                                                         return UpdateFulfillFetch::DuplicateClaim {};
3495                                                 }
3496                                         },
3497                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3498                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3499                                         {
3500                                                 if htlc_id_arg == htlc_id {
3501                                                         log_warn!(logger, "Have preimage and want to fulfill HTLC with pending failure against channel {}", &self.context.channel_id());
3502                                                         // TODO: We may actually be able to switch to a fulfill here, though its
3503                                                         // rare enough it may not be worth the complexity burden.
3504                                                         debug_assert!(false, "Tried to fulfill an HTLC that was already failed");
3505                                                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3506                                                 }
3507                                         },
3508                                         _ => {}
3509                                 }
3510                         }
3511                         log_trace!(logger, "Adding HTLC claim to holding_cell in channel {}! Current state: {}", &self.context.channel_id(), self.context.channel_state.to_u32());
3512                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::ClaimHTLC {
3513                                 payment_preimage: payment_preimage_arg, htlc_id: htlc_id_arg,
3514                         });
3515                         #[cfg(any(test, fuzzing))]
3516                         self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3517                         return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3518                 }
3519                 #[cfg(any(test, fuzzing))]
3520                 self.context.historical_inbound_htlc_fulfills.insert(htlc_id_arg);
3521
3522                 {
3523                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3524                         if let InboundHTLCState::Committed = htlc.state {
3525                         } else {
3526                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3527                                 return UpdateFulfillFetch::NewClaim { monitor_update, htlc_value_msat, msg: None };
3528                         }
3529                         log_trace!(logger, "Upgrading HTLC {} to LocalRemoved with a Fulfill in channel {}!", &htlc.payment_hash, &self.context.channel_id);
3530                         htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::Fulfill(payment_preimage_arg.clone()));
3531                 }
3532
3533                 UpdateFulfillFetch::NewClaim {
3534                         monitor_update,
3535                         htlc_value_msat,
3536                         msg: Some(msgs::UpdateFulfillHTLC {
3537                                 channel_id: self.context.channel_id(),
3538                                 htlc_id: htlc_id_arg,
3539                                 payment_preimage: payment_preimage_arg,
3540                         }),
3541                 }
3542         }
3543
3544         pub fn get_update_fulfill_htlc_and_commit<L: Deref>(&mut self, htlc_id: u64, payment_preimage: PaymentPreimage, logger: &L) -> UpdateFulfillCommitFetch where L::Target: Logger {
3545                 let release_cs_monitor = self.context.blocked_monitor_updates.is_empty();
3546                 match self.get_update_fulfill_htlc(htlc_id, payment_preimage, logger) {
3547                         UpdateFulfillFetch::NewClaim { mut monitor_update, htlc_value_msat, msg } => {
3548                                 // Even if we aren't supposed to let new monitor updates with commitment state
3549                                 // updates run, we still need to push the preimage ChannelMonitorUpdateStep no
3550                                 // matter what. Sadly, to push a new monitor update which flies before others
3551                                 // already queued, we have to insert it into the pending queue and update the
3552                                 // update_ids of all the following monitors.
3553                                 if release_cs_monitor && msg.is_some() {
3554                                         let mut additional_update = self.build_commitment_no_status_check(logger);
3555                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them
3556                                         // to be strictly increasing by one, so decrement it here.
3557                                         self.context.latest_monitor_update_id = monitor_update.update_id;
3558                                         monitor_update.updates.append(&mut additional_update.updates);
3559                                 } else {
3560                                         let new_mon_id = self.context.blocked_monitor_updates.get(0)
3561                                                 .map(|upd| upd.update.update_id).unwrap_or(monitor_update.update_id);
3562                                         monitor_update.update_id = new_mon_id;
3563                                         for held_update in self.context.blocked_monitor_updates.iter_mut() {
3564                                                 held_update.update.update_id += 1;
3565                                         }
3566                                         if msg.is_some() {
3567                                                 debug_assert!(false, "If there is a pending blocked monitor we should have MonitorUpdateInProgress set");
3568                                                 let update = self.build_commitment_no_status_check(logger);
3569                                                 self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
3570                                                         update,
3571                                                 });
3572                                         }
3573                                 }
3574
3575                                 self.monitor_updating_paused(false, msg.is_some(), false, Vec::new(), Vec::new(), Vec::new());
3576                                 UpdateFulfillCommitFetch::NewClaim { monitor_update, htlc_value_msat, }
3577                         },
3578                         UpdateFulfillFetch::DuplicateClaim {} => UpdateFulfillCommitFetch::DuplicateClaim {},
3579                 }
3580         }
3581
3582         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3583         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3584         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3585         /// before we fail backwards.
3586         ///
3587         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3588         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3589         /// [`ChannelError::Ignore`].
3590         pub fn queue_fail_htlc<L: Deref>(&mut self, htlc_id_arg: u64, err_packet: msgs::OnionErrorPacket, logger: &L)
3591         -> Result<(), ChannelError> where L::Target: Logger {
3592                 self.fail_htlc(htlc_id_arg, err_packet, true, logger)
3593                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3594         }
3595
3596         /// Used for failing back with [`msgs::UpdateFailMalformedHTLC`]. For now, this is used when we
3597         /// want to fail blinded HTLCs where we are not the intro node.
3598         ///
3599         /// See [`Self::queue_fail_htlc`] for more info.
3600         pub fn queue_fail_malformed_htlc<L: Deref>(
3601                 &mut self, htlc_id_arg: u64, failure_code: u16, sha256_of_onion: [u8; 32], logger: &L
3602         ) -> Result<(), ChannelError> where L::Target: Logger {
3603                 self.fail_htlc(htlc_id_arg, (sha256_of_onion, failure_code), true, logger)
3604                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
3605         }
3606
3607         /// We can only have one resolution per HTLC. In some cases around reconnect, we may fulfill
3608         /// an HTLC more than once or fulfill once and then attempt to fail after reconnect. We cannot,
3609         /// however, fail more than once as we wait for an upstream failure to be irrevocably committed
3610         /// before we fail backwards.
3611         ///
3612         /// If we do fail twice, we `debug_assert!(false)` and return `Ok(None)`. Thus, this will always
3613         /// return `Ok(_)` if preconditions are met. In any case, `Err`s will only be
3614         /// [`ChannelError::Ignore`].
3615         fn fail_htlc<L: Deref, E: FailHTLCContents + Clone>(
3616                 &mut self, htlc_id_arg: u64, err_contents: E, mut force_holding_cell: bool,
3617                 logger: &L
3618         ) -> Result<Option<E::Message>, ChannelError> where L::Target: Logger {
3619                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3620                         panic!("Was asked to fail an HTLC when channel was not in an operational state");
3621                 }
3622
3623                 // ChannelManager may generate duplicate claims/fails due to HTLC update events from
3624                 // on-chain ChannelsMonitors during block rescan. Ideally we'd figure out a way to drop
3625                 // these, but for now we just have to treat them as normal.
3626
3627                 let mut pending_idx = core::usize::MAX;
3628                 for (idx, htlc) in self.context.pending_inbound_htlcs.iter().enumerate() {
3629                         if htlc.htlc_id == htlc_id_arg {
3630                                 match htlc.state {
3631                                         InboundHTLCState::Committed => {},
3632                                         InboundHTLCState::LocalRemoved(ref reason) => {
3633                                                 if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
3634                                                 } else {
3635                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
3636                                                 }
3637                                                 return Ok(None);
3638                                         },
3639                                         _ => {
3640                                                 debug_assert!(false, "Have an inbound HTLC we tried to claim before it was fully committed to");
3641                                                 return Err(ChannelError::Ignore(format!("Unable to find a pending HTLC which matched the given HTLC ID ({})", htlc.htlc_id)));
3642                                         }
3643                                 }
3644                                 pending_idx = idx;
3645                         }
3646                 }
3647                 if pending_idx == core::usize::MAX {
3648                         #[cfg(any(test, fuzzing))]
3649                         // If we failed to find an HTLC to fail, make sure it was previously fulfilled and this
3650                         // is simply a duplicate fail, not previously failed and we failed-back too early.
3651                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3652                         return Ok(None);
3653                 }
3654
3655                 if !self.context.channel_state.can_generate_new_commitment() {
3656                         debug_assert!(force_holding_cell, "!force_holding_cell is only called when emptying the holding cell, so we shouldn't end up back in it!");
3657                         force_holding_cell = true;
3658                 }
3659
3660                 // Now update local state:
3661                 if force_holding_cell {
3662                         for pending_update in self.context.holding_cell_htlc_updates.iter() {
3663                                 match pending_update {
3664                                         &HTLCUpdateAwaitingACK::ClaimHTLC { htlc_id, .. } => {
3665                                                 if htlc_id_arg == htlc_id {
3666                                                         #[cfg(any(test, fuzzing))]
3667                                                         debug_assert!(self.context.historical_inbound_htlc_fulfills.contains(&htlc_id_arg));
3668                                                         return Ok(None);
3669                                                 }
3670                                         },
3671                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, .. } |
3672                                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, .. } =>
3673                                         {
3674                                                 if htlc_id_arg == htlc_id {
3675                                                         debug_assert!(false, "Tried to fail an HTLC that was already failed");
3676                                                         return Err(ChannelError::Ignore("Unable to find a pending HTLC which matched the given HTLC ID".to_owned()));
3677                                                 }
3678                                         },
3679                                         _ => {}
3680                                 }
3681                         }
3682                         log_trace!(logger, "Placing failure for HTLC ID {} in holding cell in channel {}.", htlc_id_arg, &self.context.channel_id());
3683                         self.context.holding_cell_htlc_updates.push(err_contents.to_htlc_update_awaiting_ack(htlc_id_arg));
3684                         return Ok(None);
3685                 }
3686
3687                 log_trace!(logger, "Failing HTLC ID {} back with {} message in channel {}.", htlc_id_arg,
3688                         E::Message::name(), &self.context.channel_id());
3689                 {
3690                         let htlc = &mut self.context.pending_inbound_htlcs[pending_idx];
3691                         htlc.state = err_contents.clone().to_inbound_htlc_state();
3692                 }
3693
3694                 Ok(Some(err_contents.to_message(htlc_id_arg, self.context.channel_id())))
3695         }
3696
3697         // Message handlers:
3698         /// Updates the state of the channel to indicate that all channels in the batch have received
3699         /// funding_signed and persisted their monitors.
3700         /// The funding transaction is consequently allowed to be broadcast, and the channel can be
3701         /// treated as a non-batch channel going forward.
3702         pub fn set_batch_ready(&mut self) {
3703                 self.context.is_batch_funding = None;
3704                 self.context.channel_state.clear_waiting_for_batch();
3705         }
3706
3707         /// Unsets the existing funding information.
3708         ///
3709         /// This must only be used if the channel has not yet completed funding and has not been used.
3710         ///
3711         /// Further, the channel must be immediately shut down after this with a call to
3712         /// [`ChannelContext::force_shutdown`].
3713         pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
3714                 debug_assert!(matches!(
3715                         self.context.channel_state, ChannelState::AwaitingChannelReady(_)
3716                 ));
3717                 self.context.channel_transaction_parameters.funding_outpoint = None;
3718                 self.context.channel_id = temporary_channel_id;
3719         }
3720
3721         /// Handles a channel_ready message from our peer. If we've already sent our channel_ready
3722         /// and the channel is now usable (and public), this may generate an announcement_signatures to
3723         /// reply with.
3724         pub fn channel_ready<NS: Deref, L: Deref>(
3725                 &mut self, msg: &msgs::ChannelReady, node_signer: &NS, chain_hash: ChainHash,
3726                 user_config: &UserConfig, best_block: &BestBlock, logger: &L
3727         ) -> Result<Option<msgs::AnnouncementSignatures>, ChannelError>
3728         where
3729                 NS::Target: NodeSigner,
3730                 L::Target: Logger
3731         {
3732                 if self.context.channel_state.is_peer_disconnected() {
3733                         self.context.workaround_lnd_bug_4006 = Some(msg.clone());
3734                         return Err(ChannelError::Ignore("Peer sent channel_ready when we needed a channel_reestablish. The peer is likely lnd, see https://github.com/lightningnetwork/lnd/issues/4006".to_owned()));
3735                 }
3736
3737                 if let Some(scid_alias) = msg.short_channel_id_alias {
3738                         if Some(scid_alias) != self.context.short_channel_id {
3739                                 // The scid alias provided can be used to route payments *from* our counterparty,
3740                                 // i.e. can be used for inbound payments and provided in invoices, but is not used
3741                                 // when routing outbound payments.
3742                                 self.context.latest_inbound_scid_alias = Some(scid_alias);
3743                         }
3744                 }
3745
3746                 // Our channel_ready shouldn't have been sent if we are waiting for other channels in the
3747                 // batch, but we can receive channel_ready messages.
3748                 let mut check_reconnection = false;
3749                 match &self.context.channel_state {
3750                         ChannelState::AwaitingChannelReady(flags) => {
3751                                 let flags = flags.clone().clear(FundedStateFlags::ALL.into());
3752                                 debug_assert!(!flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY) || !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3753                                 if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY {
3754                                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
3755                                         check_reconnection = true;
3756                                 } else if flags.clone().clear(AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty() {
3757                                         self.context.channel_state.set_their_channel_ready();
3758                                 } else if flags == AwaitingChannelReadyFlags::OUR_CHANNEL_READY {
3759                                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
3760                                         self.context.update_time_counter += 1;
3761                                 } else {
3762                                         // We're in `WAITING_FOR_BATCH`, so we should wait until we're ready.
3763                                         debug_assert!(flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
3764                                 }
3765                         }
3766                         // If we reconnected before sending our `channel_ready` they may still resend theirs.
3767                         ChannelState::ChannelReady(_) => check_reconnection = true,
3768                         _ => return Err(ChannelError::Close("Peer sent a channel_ready at a strange time".to_owned())),
3769                 }
3770                 if check_reconnection {
3771                         // They probably disconnected/reconnected and re-sent the channel_ready, which is
3772                         // required, or they're sending a fresh SCID alias.
3773                         let expected_point =
3774                                 if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
3775                                         // If they haven't ever sent an updated point, the point they send should match
3776                                         // the current one.
3777                                         self.context.counterparty_cur_commitment_point
3778                                 } else if self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 2 {
3779                                         // If we've advanced the commitment number once, the second commitment point is
3780                                         // at `counterparty_prev_commitment_point`, which is not yet revoked.
3781                                         debug_assert!(self.context.counterparty_prev_commitment_point.is_some());
3782                                         self.context.counterparty_prev_commitment_point
3783                                 } else {
3784                                         // If they have sent updated points, channel_ready is always supposed to match
3785                                         // their "first" point, which we re-derive here.
3786                                         Some(PublicKey::from_secret_key(&self.context.secp_ctx, &SecretKey::from_slice(
3787                                                         &self.context.commitment_secrets.get_secret(INITIAL_COMMITMENT_NUMBER - 1).expect("We should have all prev secrets available")
3788                                                 ).expect("We already advanced, so previous secret keys should have been validated already")))
3789                                 };
3790                         if expected_point != Some(msg.next_per_commitment_point) {
3791                                 return Err(ChannelError::Close("Peer sent a reconnect channel_ready with a different point".to_owned()));
3792                         }
3793                         return Ok(None);
3794                 }
3795
3796                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
3797                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
3798
3799                 log_info!(logger, "Received channel_ready from peer for channel {}", &self.context.channel_id());
3800
3801                 Ok(self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger))
3802         }
3803
3804         pub fn update_add_htlc<F, FE: Deref, L: Deref>(
3805                 &mut self, msg: &msgs::UpdateAddHTLC, mut pending_forward_status: PendingHTLCStatus,
3806                 create_pending_htlc_status: F, fee_estimator: &LowerBoundedFeeEstimator<FE>, logger: &L
3807         ) -> Result<(), ChannelError>
3808         where F: for<'a> Fn(&'a Self, PendingHTLCStatus, u16) -> PendingHTLCStatus,
3809                 FE::Target: FeeEstimator, L::Target: Logger,
3810         {
3811                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
3812                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3813                 }
3814                 // We can't accept HTLCs sent after we've sent a shutdown.
3815                 if self.context.channel_state.is_local_shutdown_sent() {
3816                         pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x4000|8);
3817                 }
3818                 // If the remote has sent a shutdown prior to adding this HTLC, then they are in violation of the spec.
3819                 if self.context.channel_state.is_remote_shutdown_sent() {
3820                         return Err(ChannelError::Close("Got add HTLC message when channel was not in an operational state".to_owned()));
3821                 }
3822                 if self.context.channel_state.is_peer_disconnected() {
3823                         return Err(ChannelError::Close("Peer sent update_add_htlc when we needed a channel_reestablish".to_owned()));
3824                 }
3825                 if msg.amount_msat > self.context.channel_value_satoshis * 1000 {
3826                         return Err(ChannelError::Close("Remote side tried to send more than the total value of the channel".to_owned()));
3827                 }
3828                 if msg.amount_msat == 0 {
3829                         return Err(ChannelError::Close("Remote side tried to send a 0-msat HTLC".to_owned()));
3830                 }
3831                 if msg.amount_msat < self.context.holder_htlc_minimum_msat {
3832                         return Err(ChannelError::Close(format!("Remote side tried to send less than our minimum HTLC value. Lower limit: ({}). Actual: ({})", self.context.holder_htlc_minimum_msat, msg.amount_msat)));
3833                 }
3834
3835                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
3836                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
3837                 if inbound_stats.pending_htlcs + 1 > self.context.holder_max_accepted_htlcs as u32 {
3838                         return Err(ChannelError::Close(format!("Remote tried to push more than our max accepted HTLCs ({})", self.context.holder_max_accepted_htlcs)));
3839                 }
3840                 if inbound_stats.pending_htlcs_value_msat + msg.amount_msat > self.context.holder_max_htlc_value_in_flight_msat {
3841                         return Err(ChannelError::Close(format!("Remote HTLC add would put them over our max HTLC value ({})", self.context.holder_max_htlc_value_in_flight_msat)));
3842                 }
3843
3844                 // Check holder_selected_channel_reserve_satoshis (we're getting paid, so they have to at least meet
3845                 // the reserve_satoshis we told them to always have as direct payment so that they lose
3846                 // something if we punish them for broadcasting an old state).
3847                 // Note that we don't really care about having a small/no to_remote output in our local
3848                 // commitment transactions, as the purpose of the channel reserve is to ensure we can
3849                 // punish *them* if they misbehave, so we discount any outbound HTLCs which will not be
3850                 // present in the next commitment transaction we send them (at least for fulfilled ones,
3851                 // failed ones won't modify value_to_self).
3852                 // Note that we will send HTLCs which another instance of rust-lightning would think
3853                 // violate the reserve value if we do not do this (as we forget inbound HTLCs from the
3854                 // Channel state once they will not be present in the next received commitment
3855                 // transaction).
3856                 let mut removed_outbound_total_msat = 0;
3857                 for ref htlc in self.context.pending_outbound_htlcs.iter() {
3858                         if let OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(_)) = htlc.state {
3859                                 removed_outbound_total_msat += htlc.amount_msat;
3860                         } else if let OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(_)) = htlc.state {
3861                                 removed_outbound_total_msat += htlc.amount_msat;
3862                         }
3863                 }
3864
3865                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
3866                 let (htlc_timeout_dust_limit, htlc_success_dust_limit) = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3867                         (0, 0)
3868                 } else {
3869                         let dust_buffer_feerate = self.context.get_dust_buffer_feerate(None) as u64;
3870                         (dust_buffer_feerate * htlc_timeout_tx_weight(self.context.get_channel_type()) / 1000,
3871                                 dust_buffer_feerate * htlc_success_tx_weight(self.context.get_channel_type()) / 1000)
3872                 };
3873                 let exposure_dust_limit_timeout_sats = htlc_timeout_dust_limit + self.context.counterparty_dust_limit_satoshis;
3874                 if msg.amount_msat / 1000 < exposure_dust_limit_timeout_sats {
3875                         let on_counterparty_tx_dust_htlc_exposure_msat = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat + msg.amount_msat;
3876                         if on_counterparty_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3877                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on counterparty commitment tx",
3878                                         on_counterparty_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3879                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3880                         }
3881                 }
3882
3883                 let exposure_dust_limit_success_sats = htlc_success_dust_limit + self.context.holder_dust_limit_satoshis;
3884                 if msg.amount_msat / 1000 < exposure_dust_limit_success_sats {
3885                         let on_holder_tx_dust_htlc_exposure_msat = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat + msg.amount_msat;
3886                         if on_holder_tx_dust_htlc_exposure_msat > max_dust_htlc_exposure_msat {
3887                                 log_info!(logger, "Cannot accept value that would put our exposure to dust HTLCs at {} over the limit {} on holder commitment tx",
3888                                         on_holder_tx_dust_htlc_exposure_msat, max_dust_htlc_exposure_msat);
3889                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3890                         }
3891                 }
3892
3893                 let pending_value_to_self_msat =
3894                         self.context.value_to_self_msat + inbound_stats.pending_htlcs_value_msat - removed_outbound_total_msat;
3895                 let pending_remote_value_msat =
3896                         self.context.channel_value_satoshis * 1000 - pending_value_to_self_msat;
3897                 if pending_remote_value_msat < msg.amount_msat {
3898                         return Err(ChannelError::Close("Remote HTLC add would overdraw remaining funds".to_owned()));
3899                 }
3900
3901                 // Check that the remote can afford to pay for this HTLC on-chain at the current
3902                 // feerate_per_kw, while maintaining their channel reserve (as required by the spec).
3903                 {
3904                         let remote_commit_tx_fee_msat = if self.context.is_outbound() { 0 } else {
3905                                 let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3906                                 self.context.next_remote_commit_tx_fee_msat(htlc_candidate, None) // Don't include the extra fee spike buffer HTLC in calculations
3907                         };
3908                         let anchor_outputs_value_msat = if !self.context.is_outbound() && self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3909                                 ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3910                         } else {
3911                                 0
3912                         };
3913                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(anchor_outputs_value_msat) < remote_commit_tx_fee_msat {
3914                                 return Err(ChannelError::Close("Remote HTLC add would not leave enough to pay for fees".to_owned()));
3915                         };
3916                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(remote_commit_tx_fee_msat).saturating_sub(anchor_outputs_value_msat) < self.context.holder_selected_channel_reserve_satoshis * 1000 {
3917                                 return Err(ChannelError::Close("Remote HTLC add would put them under remote reserve value".to_owned()));
3918                         }
3919                 }
3920
3921                 let anchor_outputs_value_msat = if self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3922                         ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000
3923                 } else {
3924                         0
3925                 };
3926                 if !self.context.is_outbound() {
3927                         // `Some(())` is for the fee spike buffer we keep for the remote. This deviates from
3928                         // the spec because the fee spike buffer requirement doesn't exist on the receiver's
3929                         // side, only on the sender's. Note that with anchor outputs we are no longer as
3930                         // sensitive to fee spikes, so we need to account for them.
3931                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3932                         let mut remote_fee_cost_incl_stuck_buffer_msat = self.context.next_remote_commit_tx_fee_msat(htlc_candidate, Some(()));
3933                         if !self.context.get_channel_type().supports_anchors_zero_fee_htlc_tx() {
3934                                 remote_fee_cost_incl_stuck_buffer_msat *= FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE;
3935                         }
3936                         if pending_remote_value_msat.saturating_sub(msg.amount_msat).saturating_sub(self.context.holder_selected_channel_reserve_satoshis * 1000).saturating_sub(anchor_outputs_value_msat) < remote_fee_cost_incl_stuck_buffer_msat {
3937                                 // Note that if the pending_forward_status is not updated here, then it's because we're already failing
3938                                 // the HTLC, i.e. its status is already set to failing.
3939                                 log_info!(logger, "Attempting to fail HTLC due to fee spike buffer violation in channel {}. Rebalancing is required.", &self.context.channel_id());
3940                                 pending_forward_status = create_pending_htlc_status(self, pending_forward_status, 0x1000|7);
3941                         }
3942                 } else {
3943                         // Check that they won't violate our local required channel reserve by adding this HTLC.
3944                         let htlc_candidate = HTLCCandidate::new(msg.amount_msat, HTLCInitiator::RemoteOffered);
3945                         let local_commit_tx_fee_msat = self.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
3946                         if self.context.value_to_self_msat < self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 + local_commit_tx_fee_msat + anchor_outputs_value_msat {
3947                                 return Err(ChannelError::Close("Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_owned()));
3948                         }
3949                 }
3950                 if self.context.next_counterparty_htlc_id != msg.htlc_id {
3951                         return Err(ChannelError::Close(format!("Remote skipped HTLC ID (skipped ID: {})", self.context.next_counterparty_htlc_id)));
3952                 }
3953                 if msg.cltv_expiry >= 500000000 {
3954                         return Err(ChannelError::Close("Remote provided CLTV expiry in seconds instead of block height".to_owned()));
3955                 }
3956
3957                 if self.context.channel_state.is_local_shutdown_sent() {
3958                         if let PendingHTLCStatus::Forward(_) = pending_forward_status {
3959                                 panic!("ChannelManager shouldn't be trying to add a forwardable HTLC after we've started closing");
3960                         }
3961                 }
3962
3963                 // Now update local state:
3964                 self.context.next_counterparty_htlc_id += 1;
3965                 self.context.pending_inbound_htlcs.push(InboundHTLCOutput {
3966                         htlc_id: msg.htlc_id,
3967                         amount_msat: msg.amount_msat,
3968                         payment_hash: msg.payment_hash,
3969                         cltv_expiry: msg.cltv_expiry,
3970                         state: InboundHTLCState::RemoteAnnounced(pending_forward_status),
3971                 });
3972                 Ok(())
3973         }
3974
3975         /// Marks an outbound HTLC which we have received update_fail/fulfill/malformed
3976         #[inline]
3977         fn mark_outbound_htlc_removed(&mut self, htlc_id: u64, check_preimage: Option<PaymentPreimage>, fail_reason: Option<HTLCFailReason>) -> Result<&OutboundHTLCOutput, ChannelError> {
3978                 assert!(!(check_preimage.is_some() && fail_reason.is_some()), "cannot fail while we have a preimage");
3979                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
3980                         if htlc.htlc_id == htlc_id {
3981                                 let outcome = match check_preimage {
3982                                         None => fail_reason.into(),
3983                                         Some(payment_preimage) => {
3984                                                 let payment_hash = PaymentHash(Sha256::hash(&payment_preimage.0[..]).to_byte_array());
3985                                                 if payment_hash != htlc.payment_hash {
3986                                                         return Err(ChannelError::Close(format!("Remote tried to fulfill HTLC ({}) with an incorrect preimage", htlc_id)));
3987                                                 }
3988                                                 OutboundHTLCOutcome::Success(Some(payment_preimage))
3989                                         }
3990                                 };
3991                                 match htlc.state {
3992                                         OutboundHTLCState::LocalAnnounced(_) =>
3993                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) before it had been committed", htlc_id))),
3994                                         OutboundHTLCState::Committed => {
3995                                                 htlc.state = OutboundHTLCState::RemoteRemoved(outcome);
3996                                         },
3997                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(_) | OutboundHTLCState::AwaitingRemovedRemoteRevoke(_) | OutboundHTLCState::RemoteRemoved(_) =>
3998                                                 return Err(ChannelError::Close(format!("Remote tried to fulfill/fail HTLC ({}) that they'd already fulfilled/failed", htlc_id))),
3999                                 }
4000                                 return Ok(htlc);
4001                         }
4002                 }
4003                 Err(ChannelError::Close("Remote tried to fulfill/fail an HTLC we couldn't find".to_owned()))
4004         }
4005
4006         pub fn update_fulfill_htlc(&mut self, msg: &msgs::UpdateFulfillHTLC) -> Result<(HTLCSource, u64, Option<u64>), ChannelError> {
4007                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4008                         return Err(ChannelError::Close("Got fulfill HTLC message when channel was not in an operational state".to_owned()));
4009                 }
4010                 if self.context.channel_state.is_peer_disconnected() {
4011                         return Err(ChannelError::Close("Peer sent update_fulfill_htlc when we needed a channel_reestablish".to_owned()));
4012                 }
4013
4014                 self.mark_outbound_htlc_removed(msg.htlc_id, Some(msg.payment_preimage), None).map(|htlc| (htlc.source.clone(), htlc.amount_msat, htlc.skimmed_fee_msat))
4015         }
4016
4017         pub fn update_fail_htlc(&mut self, msg: &msgs::UpdateFailHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4018                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4019                         return Err(ChannelError::Close("Got fail HTLC message when channel was not in an operational state".to_owned()));
4020                 }
4021                 if self.context.channel_state.is_peer_disconnected() {
4022                         return Err(ChannelError::Close("Peer sent update_fail_htlc when we needed a channel_reestablish".to_owned()));
4023                 }
4024
4025                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4026                 Ok(())
4027         }
4028
4029         pub fn update_fail_malformed_htlc(&mut self, msg: &msgs::UpdateFailMalformedHTLC, fail_reason: HTLCFailReason) -> Result<(), ChannelError> {
4030                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4031                         return Err(ChannelError::Close("Got fail malformed HTLC message when channel was not in an operational state".to_owned()));
4032                 }
4033                 if self.context.channel_state.is_peer_disconnected() {
4034                         return Err(ChannelError::Close("Peer sent update_fail_malformed_htlc when we needed a channel_reestablish".to_owned()));
4035                 }
4036
4037                 self.mark_outbound_htlc_removed(msg.htlc_id, None, Some(fail_reason))?;
4038                 Ok(())
4039         }
4040
4041         pub fn commitment_signed<L: Deref>(&mut self, msg: &msgs::CommitmentSigned, logger: &L) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
4042                 where L::Target: Logger
4043         {
4044                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4045                         return Err(ChannelError::Close("Got commitment signed message when channel was not in an operational state".to_owned()));
4046                 }
4047                 if self.context.channel_state.is_peer_disconnected() {
4048                         return Err(ChannelError::Close("Peer sent commitment_signed when we needed a channel_reestablish".to_owned()));
4049                 }
4050                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4051                         return Err(ChannelError::Close("Peer sent commitment_signed after we'd started exchanging closing_signeds".to_owned()));
4052                 }
4053
4054                 let funding_script = self.context.get_funding_redeemscript();
4055
4056                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4057
4058                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger);
4059                 let commitment_txid = {
4060                         let trusted_tx = commitment_stats.tx.trust();
4061                         let bitcoin_tx = trusted_tx.built_transaction();
4062                         let sighash = bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
4063
4064                         log_trace!(logger, "Checking commitment tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}",
4065                                 log_bytes!(msg.signature.serialize_compact()[..]),
4066                                 log_bytes!(self.context.counterparty_funding_pubkey().serialize()), encode::serialize_hex(&bitcoin_tx.transaction),
4067                                 log_bytes!(sighash[..]), encode::serialize_hex(&funding_script), &self.context.channel_id());
4068                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.counterparty_funding_pubkey()) {
4069                                 return Err(ChannelError::Close("Invalid commitment tx signature from peer".to_owned()));
4070                         }
4071                         bitcoin_tx.txid
4072                 };
4073                 let mut htlcs_cloned: Vec<_> = commitment_stats.htlcs_included.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect();
4074
4075                 // If our counterparty updated the channel fee in this commitment transaction, check that
4076                 // they can actually afford the new fee now.
4077                 let update_fee = if let Some((_, update_state)) = self.context.pending_update_fee {
4078                         update_state == FeeUpdateState::RemoteAnnounced
4079                 } else { false };
4080                 if update_fee {
4081                         debug_assert!(!self.context.is_outbound());
4082                         let counterparty_reserve_we_require_msat = self.context.holder_selected_channel_reserve_satoshis * 1000;
4083                         if commitment_stats.remote_balance_msat < commitment_stats.total_fee_sat * 1000 + counterparty_reserve_we_require_msat {
4084                                 return Err(ChannelError::Close("Funding remote cannot afford proposed new fee".to_owned()));
4085                         }
4086                 }
4087                 #[cfg(any(test, fuzzing))]
4088                 {
4089                         if self.context.is_outbound() {
4090                                 let projected_commit_tx_info = self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap().take();
4091                                 *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4092                                 if let Some(info) = projected_commit_tx_info {
4093                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len()
4094                                                 + self.context.holding_cell_htlc_updates.len();
4095                                         if info.total_pending_htlcs == total_pending_htlcs
4096                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
4097                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
4098                                                 && info.feerate == self.context.feerate_per_kw {
4099                                                         assert_eq!(commitment_stats.total_fee_sat, info.fee / 1000);
4100                                                 }
4101                                 }
4102                         }
4103                 }
4104
4105                 if msg.htlc_signatures.len() != commitment_stats.num_nondust_htlcs {
4106                         return Err(ChannelError::Close(format!("Got wrong number of HTLC signatures ({}) from remote. It must be {}", msg.htlc_signatures.len(), commitment_stats.num_nondust_htlcs)));
4107                 }
4108
4109                 // Up to LDK 0.0.115, HTLC information was required to be duplicated in the
4110                 // `htlcs_and_sigs` vec and in the `holder_commitment_tx` itself, both of which were passed
4111                 // in the `ChannelMonitorUpdate`. In 0.0.115, support for having a separate set of
4112                 // outbound-non-dust-HTLCSources in the `ChannelMonitorUpdate` was added, however for
4113                 // backwards compatibility, we never use it in production. To provide test coverage, here,
4114                 // we randomly decide (in test/fuzzing builds) to use the new vec sometimes.
4115                 #[allow(unused_assignments, unused_mut)]
4116                 let mut separate_nondust_htlc_sources = false;
4117                 #[cfg(all(feature = "std", any(test, fuzzing)))] {
4118                         use core::hash::{BuildHasher, Hasher};
4119                         // Get a random value using the only std API to do so - the DefaultHasher
4120                         let rand_val = std::collections::hash_map::RandomState::new().build_hasher().finish();
4121                         separate_nondust_htlc_sources = rand_val % 2 == 0;
4122                 }
4123
4124                 let mut nondust_htlc_sources = Vec::with_capacity(htlcs_cloned.len());
4125                 let mut htlcs_and_sigs = Vec::with_capacity(htlcs_cloned.len());
4126                 for (idx, (htlc, mut source_opt)) in htlcs_cloned.drain(..).enumerate() {
4127                         if let Some(_) = htlc.transaction_output_index {
4128                                 let htlc_tx = chan_utils::build_htlc_transaction(&commitment_txid, commitment_stats.feerate_per_kw,
4129                                         self.context.get_counterparty_selected_contest_delay().unwrap(), &htlc, &self.context.channel_type,
4130                                         &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
4131
4132                                 let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &keys);
4133                                 let htlc_sighashtype = if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
4134                                 let htlc_sighash = hash_to_message!(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]);
4135                                 log_trace!(logger, "Checking HTLC tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} in channel {}.",
4136                                         log_bytes!(msg.htlc_signatures[idx].serialize_compact()[..]), log_bytes!(keys.countersignatory_htlc_key.to_public_key().serialize()),
4137                                         encode::serialize_hex(&htlc_tx), log_bytes!(htlc_sighash[..]), encode::serialize_hex(&htlc_redeemscript), &self.context.channel_id());
4138                                 if let Err(_) = self.context.secp_ctx.verify_ecdsa(&htlc_sighash, &msg.htlc_signatures[idx], &keys.countersignatory_htlc_key.to_public_key()) {
4139                                         return Err(ChannelError::Close("Invalid HTLC tx signature from peer".to_owned()));
4140                                 }
4141                                 if !separate_nondust_htlc_sources {
4142                                         htlcs_and_sigs.push((htlc, Some(msg.htlc_signatures[idx]), source_opt.take()));
4143                                 }
4144                         } else {
4145                                 htlcs_and_sigs.push((htlc, None, source_opt.take()));
4146                         }
4147                         if separate_nondust_htlc_sources {
4148                                 if let Some(source) = source_opt.take() {
4149                                         nondust_htlc_sources.push(source);
4150                                 }
4151                         }
4152                         debug_assert!(source_opt.is_none(), "HTLCSource should have been put somewhere");
4153                 }
4154
4155                 let holder_commitment_tx = HolderCommitmentTransaction::new(
4156                         commitment_stats.tx,
4157                         msg.signature,
4158                         msg.htlc_signatures.clone(),
4159                         &self.context.get_holder_pubkeys().funding_pubkey,
4160                         self.context.counterparty_funding_pubkey()
4161                 );
4162
4163                 self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, commitment_stats.outbound_htlc_preimages)
4164                         .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?;
4165
4166                 // Update state now that we've passed all the can-fail calls...
4167                 let mut need_commitment = false;
4168                 if let &mut Some((_, ref mut update_state)) = &mut self.context.pending_update_fee {
4169                         if *update_state == FeeUpdateState::RemoteAnnounced {
4170                                 *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce;
4171                                 need_commitment = true;
4172                         }
4173                 }
4174
4175                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
4176                         let new_forward = if let &InboundHTLCState::RemoteAnnounced(ref forward_info) = &htlc.state {
4177                                 Some(forward_info.clone())
4178                         } else { None };
4179                         if let Some(forward_info) = new_forward {
4180                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToAnnounce due to commitment_signed in channel {}.",
4181                                         &htlc.payment_hash, &self.context.channel_id);
4182                                 htlc.state = InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info);
4183                                 need_commitment = true;
4184                         }
4185                 }
4186                 let mut claimed_htlcs = Vec::new();
4187                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4188                         if let &mut OutboundHTLCState::RemoteRemoved(ref mut outcome) = &mut htlc.state {
4189                                 log_trace!(logger, "Updating HTLC {} to AwaitingRemoteRevokeToRemove due to commitment_signed in channel {}.",
4190                                         &htlc.payment_hash, &self.context.channel_id);
4191                                 // Grab the preimage, if it exists, instead of cloning
4192                                 let mut reason = OutboundHTLCOutcome::Success(None);
4193                                 mem::swap(outcome, &mut reason);
4194                                 if let OutboundHTLCOutcome::Success(Some(preimage)) = reason {
4195                                         // If a user (a) receives an HTLC claim using LDK 0.0.104 or before, then (b)
4196                                         // upgrades to LDK 0.0.114 or later before the HTLC is fully resolved, we could
4197                                         // have a `Success(None)` reason. In this case we could forget some HTLC
4198                                         // claims, but such an upgrade is unlikely and including claimed HTLCs here
4199                                         // fixes a bug which the user was exposed to on 0.0.104 when they started the
4200                                         // claim anyway.
4201                                         claimed_htlcs.push((SentHTLCId::from_source(&htlc.source), preimage));
4202                                 }
4203                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(reason);
4204                                 need_commitment = true;
4205                         }
4206                 }
4207
4208                 self.context.latest_monitor_update_id += 1;
4209                 let mut monitor_update = ChannelMonitorUpdate {
4210                         update_id: self.context.latest_monitor_update_id,
4211                         counterparty_node_id: Some(self.context.counterparty_node_id),
4212                         updates: vec![ChannelMonitorUpdateStep::LatestHolderCommitmentTXInfo {
4213                                 commitment_tx: holder_commitment_tx,
4214                                 htlc_outputs: htlcs_and_sigs,
4215                                 claimed_htlcs,
4216                                 nondust_htlc_sources,
4217                         }],
4218                         channel_id: Some(self.context.channel_id()),
4219                 };
4220
4221                 self.context.cur_holder_commitment_transaction_number -= 1;
4222                 self.context.expecting_peer_commitment_signed = false;
4223                 // Note that if we need_commitment & !AwaitingRemoteRevoke we'll call
4224                 // build_commitment_no_status_check() next which will reset this to RAAFirst.
4225                 self.context.resend_order = RAACommitmentOrder::CommitmentFirst;
4226
4227                 if self.context.channel_state.is_monitor_update_in_progress() {
4228                         // In case we initially failed monitor updating without requiring a response, we need
4229                         // to make sure the RAA gets sent first.
4230                         self.context.monitor_pending_revoke_and_ack = true;
4231                         if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4232                                 // If we were going to send a commitment_signed after the RAA, go ahead and do all
4233                                 // the corresponding HTLC status updates so that
4234                                 // get_last_commitment_update_for_send includes the right HTLCs.
4235                                 self.context.monitor_pending_commitment_signed = true;
4236                                 let mut additional_update = self.build_commitment_no_status_check(logger);
4237                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4238                                 // strictly increasing by one, so decrement it here.
4239                                 self.context.latest_monitor_update_id = monitor_update.update_id;
4240                                 monitor_update.updates.append(&mut additional_update.updates);
4241                         }
4242                         log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updated HTLC state but awaiting a monitor update resolution to reply.",
4243                                 &self.context.channel_id);
4244                         return Ok(self.push_ret_blockable_mon_update(monitor_update));
4245                 }
4246
4247                 let need_commitment_signed = if need_commitment && !self.context.channel_state.is_awaiting_remote_revoke() {
4248                         // If we're AwaitingRemoteRevoke we can't send a new commitment here, but that's ok -
4249                         // we'll send one right away when we get the revoke_and_ack when we
4250                         // free_holding_cell_htlcs().
4251                         let mut additional_update = self.build_commitment_no_status_check(logger);
4252                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4253                         // strictly increasing by one, so decrement it here.
4254                         self.context.latest_monitor_update_id = monitor_update.update_id;
4255                         monitor_update.updates.append(&mut additional_update.updates);
4256                         true
4257                 } else { false };
4258
4259                 log_debug!(logger, "Received valid commitment_signed from peer in channel {}, updating HTLC state and responding with{} a revoke_and_ack.",
4260                         &self.context.channel_id(), if need_commitment_signed { " our own commitment_signed and" } else { "" });
4261                 self.monitor_updating_paused(true, need_commitment_signed, false, Vec::new(), Vec::new(), Vec::new());
4262                 return Ok(self.push_ret_blockable_mon_update(monitor_update));
4263         }
4264
4265         /// Public version of the below, checking relevant preconditions first.
4266         /// If we're not in a state where freeing the holding cell makes sense, this is a no-op and
4267         /// returns `(None, Vec::new())`.
4268         pub fn maybe_free_holding_cell_htlcs<F: Deref, L: Deref>(
4269                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4270         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4271         where F::Target: FeeEstimator, L::Target: Logger
4272         {
4273                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.channel_state.can_generate_new_commitment() {
4274                         self.free_holding_cell_htlcs(fee_estimator, logger)
4275                 } else { (None, Vec::new()) }
4276         }
4277
4278         /// Frees any pending commitment updates in the holding cell, generating the relevant messages
4279         /// for our counterparty.
4280         fn free_holding_cell_htlcs<F: Deref, L: Deref>(
4281                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4282         ) -> (Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>)
4283         where F::Target: FeeEstimator, L::Target: Logger
4284         {
4285                 assert!(!self.context.channel_state.is_monitor_update_in_progress());
4286                 if self.context.holding_cell_htlc_updates.len() != 0 || self.context.holding_cell_update_fee.is_some() {
4287                         log_trace!(logger, "Freeing holding cell with {} HTLC updates{} in channel {}", self.context.holding_cell_htlc_updates.len(),
4288                                 if self.context.holding_cell_update_fee.is_some() { " and a fee update" } else { "" }, &self.context.channel_id());
4289
4290                         let mut monitor_update = ChannelMonitorUpdate {
4291                                 update_id: self.context.latest_monitor_update_id + 1, // We don't increment this yet!
4292                                 counterparty_node_id: Some(self.context.counterparty_node_id),
4293                                 updates: Vec::new(),
4294                                 channel_id: Some(self.context.channel_id()),
4295                         };
4296
4297                         let mut htlc_updates = Vec::new();
4298                         mem::swap(&mut htlc_updates, &mut self.context.holding_cell_htlc_updates);
4299                         let mut update_add_count = 0;
4300                         let mut update_fulfill_count = 0;
4301                         let mut update_fail_count = 0;
4302                         let mut htlcs_to_fail = Vec::new();
4303                         for htlc_update in htlc_updates.drain(..) {
4304                                 // Note that this *can* fail, though it should be due to rather-rare conditions on
4305                                 // fee races with adding too many outputs which push our total payments just over
4306                                 // the limit. In case it's less rare than I anticipate, we may want to revisit
4307                                 // handling this case better and maybe fulfilling some of the HTLCs while attempting
4308                                 // to rebalance channels.
4309                                 let fail_htlc_res = match &htlc_update {
4310                                         &HTLCUpdateAwaitingACK::AddHTLC {
4311                                                 amount_msat, cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
4312                                                 skimmed_fee_msat, blinding_point, ..
4313                                         } => {
4314                                                 match self.send_htlc(
4315                                                         amount_msat, *payment_hash, cltv_expiry, source.clone(), onion_routing_packet.clone(),
4316                                                         false, skimmed_fee_msat, blinding_point, fee_estimator, logger
4317                                                 ) {
4318                                                         Ok(_) => update_add_count += 1,
4319                                                         Err(e) => {
4320                                                                 match e {
4321                                                                         ChannelError::Ignore(ref msg) => {
4322                                                                                 log_info!(logger, "Failed to send HTLC with payment_hash {} due to {} in channel {}", &payment_hash, msg, &self.context.channel_id());
4323                                                                                 // If we fail to send here, then this HTLC should
4324                                                                                 // be failed backwards. Failing to send here
4325                                                                                 // indicates that this HTLC may keep being put back
4326                                                                                 // into the holding cell without ever being
4327                                                                                 // successfully forwarded/failed/fulfilled, causing
4328                                                                                 // our counterparty to eventually close on us.
4329                                                                                 htlcs_to_fail.push((source.clone(), *payment_hash));
4330                                                                         },
4331                                                                         _ => {
4332                                                                                 panic!("Got a non-IgnoreError action trying to send holding cell HTLC");
4333                                                                         },
4334                                                                 }
4335                                                         }
4336                                                 }
4337                                                 None
4338                                         },
4339                                         &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, htlc_id, .. } => {
4340                                                 // If an HTLC claim was previously added to the holding cell (via
4341                                                 // `get_update_fulfill_htlc`, then generating the claim message itself must
4342                                                 // not fail - any in between attempts to claim the HTLC will have resulted
4343                                                 // in it hitting the holding cell again and we cannot change the state of a
4344                                                 // holding cell HTLC from fulfill to anything else.
4345                                                 let mut additional_monitor_update =
4346                                                         if let UpdateFulfillFetch::NewClaim { monitor_update, .. } =
4347                                                                 self.get_update_fulfill_htlc(htlc_id, *payment_preimage, logger)
4348                                                         { monitor_update } else { unreachable!() };
4349                                                 update_fulfill_count += 1;
4350                                                 monitor_update.updates.append(&mut additional_monitor_update.updates);
4351                                                 None
4352                                         },
4353                                         &HTLCUpdateAwaitingACK::FailHTLC { htlc_id, ref err_packet } => {
4354                                                 Some(self.fail_htlc(htlc_id, err_packet.clone(), false, logger)
4355                                                  .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4356                                         },
4357                                         &HTLCUpdateAwaitingACK::FailMalformedHTLC { htlc_id, failure_code, sha256_of_onion } => {
4358                                                 Some(self.fail_htlc(htlc_id, (sha256_of_onion, failure_code), false, logger)
4359                                                  .map(|fail_msg_opt| fail_msg_opt.map(|_| ())))
4360                                         }
4361                                 };
4362                                 if let Some(res) = fail_htlc_res {
4363                                         match res {
4364                                                 Ok(fail_msg_opt) => {
4365                                                         // If an HTLC failure was previously added to the holding cell (via
4366                                                         // `queue_fail_{malformed_}htlc`) then generating the fail message itself must
4367                                                         // not fail - we should never end up in a state where we double-fail
4368                                                         // an HTLC or fail-then-claim an HTLC as it indicates we didn't wait
4369                                                         // for a full revocation before failing.
4370                                                         debug_assert!(fail_msg_opt.is_some());
4371                                                         update_fail_count += 1;
4372                                                 },
4373                                                 Err(ChannelError::Ignore(_)) => {},
4374                                                 Err(_) => {
4375                                                         panic!("Got a non-IgnoreError action trying to fail holding cell HTLC");
4376                                                 },
4377                                         }
4378                                 }
4379                         }
4380                         if update_add_count == 0 && update_fulfill_count == 0 && update_fail_count == 0 && self.context.holding_cell_update_fee.is_none() {
4381                                 return (None, htlcs_to_fail);
4382                         }
4383                         let update_fee = if let Some(feerate) = self.context.holding_cell_update_fee.take() {
4384                                 self.send_update_fee(feerate, false, fee_estimator, logger)
4385                         } else {
4386                                 None
4387                         };
4388
4389                         let mut additional_update = self.build_commitment_no_status_check(logger);
4390                         // build_commitment_no_status_check and get_update_fulfill_htlc may bump latest_monitor_id
4391                         // but we want them to be strictly increasing by one, so reset it here.
4392                         self.context.latest_monitor_update_id = monitor_update.update_id;
4393                         monitor_update.updates.append(&mut additional_update.updates);
4394
4395                         log_debug!(logger, "Freeing holding cell in channel {} resulted in {}{} HTLCs added, {} HTLCs fulfilled, and {} HTLCs failed.",
4396                                 &self.context.channel_id(), if update_fee.is_some() { "a fee update, " } else { "" },
4397                                 update_add_count, update_fulfill_count, update_fail_count);
4398
4399                         self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
4400                         (self.push_ret_blockable_mon_update(monitor_update), htlcs_to_fail)
4401                 } else {
4402                         (None, Vec::new())
4403                 }
4404         }
4405
4406         /// Handles receiving a remote's revoke_and_ack. Note that we may return a new
4407         /// commitment_signed message here in case we had pending outbound HTLCs to add which were
4408         /// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
4409         /// generating an appropriate error *after* the channel state has been updated based on the
4410         /// revoke_and_ack message.
4411         pub fn revoke_and_ack<F: Deref, L: Deref>(&mut self, msg: &msgs::RevokeAndACK,
4412                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L, hold_mon_update: bool,
4413         ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<ChannelMonitorUpdate>), ChannelError>
4414         where F::Target: FeeEstimator, L::Target: Logger,
4415         {
4416                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) {
4417                         return Err(ChannelError::Close("Got revoke/ACK message when channel was not in an operational state".to_owned()));
4418                 }
4419                 if self.context.channel_state.is_peer_disconnected() {
4420                         return Err(ChannelError::Close("Peer sent revoke_and_ack when we needed a channel_reestablish".to_owned()));
4421                 }
4422                 if self.context.channel_state.is_both_sides_shutdown() && self.context.last_sent_closing_fee.is_some() {
4423                         return Err(ChannelError::Close("Peer sent revoke_and_ack after we'd started exchanging closing_signeds".to_owned()));
4424                 }
4425
4426                 let secret = secp_check!(SecretKey::from_slice(&msg.per_commitment_secret), "Peer provided an invalid per_commitment_secret".to_owned());
4427
4428                 if let Some(counterparty_prev_commitment_point) = self.context.counterparty_prev_commitment_point {
4429                         if PublicKey::from_secret_key(&self.context.secp_ctx, &secret) != counterparty_prev_commitment_point {
4430                                 return Err(ChannelError::Close("Got a revoke commitment secret which didn't correspond to their current pubkey".to_owned()));
4431                         }
4432                 }
4433
4434                 if !self.context.channel_state.is_awaiting_remote_revoke() {
4435                         // Our counterparty seems to have burned their coins to us (by revoking a state when we
4436                         // haven't given them a new commitment transaction to broadcast). We should probably
4437                         // take advantage of this by updating our channel monitor, sending them an error, and
4438                         // waiting for them to broadcast their latest (now-revoked claim). But, that would be a
4439                         // lot of work, and there's some chance this is all a misunderstanding anyway.
4440                         // We have to do *something*, though, since our signer may get mad at us for otherwise
4441                         // jumping a remote commitment number, so best to just force-close and move on.
4442                         return Err(ChannelError::Close("Received an unexpected revoke_and_ack".to_owned()));
4443                 }
4444
4445                 #[cfg(any(test, fuzzing))]
4446                 {
4447                         *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
4448                         *self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap() = None;
4449                 }
4450
4451                 match &self.context.holder_signer {
4452                         ChannelSignerType::Ecdsa(ecdsa) => {
4453                                 ecdsa.validate_counterparty_revocation(
4454                                         self.context.cur_counterparty_commitment_transaction_number + 1,
4455                                         &secret
4456                                 ).map_err(|_| ChannelError::Close("Failed to validate revocation from peer".to_owned()))?;
4457                         },
4458                         // TODO (taproot|arik)
4459                         #[cfg(taproot)]
4460                         _ => todo!()
4461                 };
4462
4463                 self.context.commitment_secrets.provide_secret(self.context.cur_counterparty_commitment_transaction_number + 1, msg.per_commitment_secret)
4464                         .map_err(|_| ChannelError::Close("Previous secrets did not match new one".to_owned()))?;
4465                 self.context.latest_monitor_update_id += 1;
4466                 let mut monitor_update = ChannelMonitorUpdate {
4467                         update_id: self.context.latest_monitor_update_id,
4468                         counterparty_node_id: Some(self.context.counterparty_node_id),
4469                         updates: vec![ChannelMonitorUpdateStep::CommitmentSecret {
4470                                 idx: self.context.cur_counterparty_commitment_transaction_number + 1,
4471                                 secret: msg.per_commitment_secret,
4472                         }],
4473                         channel_id: Some(self.context.channel_id()),
4474                 };
4475
4476                 // Update state now that we've passed all the can-fail calls...
4477                 // (note that we may still fail to generate the new commitment_signed message, but that's
4478                 // OK, we step the channel here and *then* if the new generation fails we can fail the
4479                 // channel based on that, but stepping stuff here should be safe either way.
4480                 self.context.channel_state.clear_awaiting_remote_revoke();
4481                 self.context.sent_message_awaiting_response = None;
4482                 self.context.counterparty_prev_commitment_point = self.context.counterparty_cur_commitment_point;
4483                 self.context.counterparty_cur_commitment_point = Some(msg.next_per_commitment_point);
4484                 self.context.cur_counterparty_commitment_transaction_number -= 1;
4485
4486                 if self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4487                         self.context.announcement_sigs_state = AnnouncementSigsState::PeerReceived;
4488                 }
4489
4490                 log_trace!(logger, "Updating HTLCs on receipt of RAA in channel {}...", &self.context.channel_id());
4491                 let mut to_forward_infos = Vec::new();
4492                 let mut revoked_htlcs = Vec::new();
4493                 let mut finalized_claimed_htlcs = Vec::new();
4494                 let mut update_fail_htlcs = Vec::new();
4495                 let mut update_fail_malformed_htlcs = Vec::new();
4496                 let mut require_commitment = false;
4497                 let mut value_to_self_msat_diff: i64 = 0;
4498
4499                 {
4500                         // Take references explicitly so that we can hold multiple references to self.context.
4501                         let pending_inbound_htlcs: &mut Vec<_> = &mut self.context.pending_inbound_htlcs;
4502                         let pending_outbound_htlcs: &mut Vec<_> = &mut self.context.pending_outbound_htlcs;
4503                         let expecting_peer_commitment_signed = &mut self.context.expecting_peer_commitment_signed;
4504
4505                         // We really shouldnt have two passes here, but retain gives a non-mutable ref (Rust bug)
4506                         pending_inbound_htlcs.retain(|htlc| {
4507                                 if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
4508                                         log_trace!(logger, " ...removing inbound LocalRemoved {}", &htlc.payment_hash);
4509                                         if let &InboundHTLCRemovalReason::Fulfill(_) = reason {
4510                                                 value_to_self_msat_diff += htlc.amount_msat as i64;
4511                                         }
4512                                         *expecting_peer_commitment_signed = true;
4513                                         false
4514                                 } else { true }
4515                         });
4516                         pending_outbound_htlcs.retain(|htlc| {
4517                                 if let &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) = &htlc.state {
4518                                         log_trace!(logger, " ...removing outbound AwaitingRemovedRemoteRevoke {}", &htlc.payment_hash);
4519                                         if let OutboundHTLCOutcome::Failure(reason) = outcome.clone() { // We really want take() here, but, again, non-mut ref :(
4520                                                 revoked_htlcs.push((htlc.source.clone(), htlc.payment_hash, reason));
4521                                         } else {
4522                                                 finalized_claimed_htlcs.push(htlc.source.clone());
4523                                                 // They fulfilled, so we sent them money
4524                                                 value_to_self_msat_diff -= htlc.amount_msat as i64;
4525                                         }
4526                                         false
4527                                 } else { true }
4528                         });
4529                         for htlc in pending_inbound_htlcs.iter_mut() {
4530                                 let swap = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_) = &htlc.state {
4531                                         true
4532                                 } else if let &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) = &htlc.state {
4533                                         true
4534                                 } else { false };
4535                                 if swap {
4536                                         let mut state = InboundHTLCState::Committed;
4537                                         mem::swap(&mut state, &mut htlc.state);
4538
4539                                         if let InboundHTLCState::AwaitingRemoteRevokeToAnnounce(forward_info) = state {
4540                                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
4541                                                 htlc.state = InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info);
4542                                                 require_commitment = true;
4543                                         } else if let InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info) = state {
4544                                                 match forward_info {
4545                                                         PendingHTLCStatus::Fail(fail_msg) => {
4546                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to LocalRemoved due to PendingHTLCStatus indicating failure", &htlc.payment_hash);
4547                                                                 require_commitment = true;
4548                                                                 match fail_msg {
4549                                                                         HTLCFailureMsg::Relay(msg) => {
4550                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailRelay(msg.reason.clone()));
4551                                                                                 update_fail_htlcs.push(msg)
4552                                                                         },
4553                                                                         HTLCFailureMsg::Malformed(msg) => {
4554                                                                                 htlc.state = InboundHTLCState::LocalRemoved(InboundHTLCRemovalReason::FailMalformed((msg.sha256_of_onion, msg.failure_code)));
4555                                                                                 update_fail_malformed_htlcs.push(msg)
4556                                                                         },
4557                                                                 }
4558                                                         },
4559                                                         PendingHTLCStatus::Forward(forward_info) => {
4560                                                                 log_trace!(logger, " ...promoting inbound AwaitingAnnouncedRemoteRevoke {} to Committed", &htlc.payment_hash);
4561                                                                 to_forward_infos.push((forward_info, htlc.htlc_id));
4562                                                                 htlc.state = InboundHTLCState::Committed;
4563                                                         }
4564                                                 }
4565                                         }
4566                                 }
4567                         }
4568                         for htlc in pending_outbound_htlcs.iter_mut() {
4569                                 if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
4570                                         log_trace!(logger, " ...promoting outbound LocalAnnounced {} to Committed", &htlc.payment_hash);
4571                                         htlc.state = OutboundHTLCState::Committed;
4572                                         *expecting_peer_commitment_signed = true;
4573                                 }
4574                                 if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
4575                                         log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
4576                                         // Grab the preimage, if it exists, instead of cloning
4577                                         let mut reason = OutboundHTLCOutcome::Success(None);
4578                                         mem::swap(outcome, &mut reason);
4579                                         htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
4580                                         require_commitment = true;
4581                                 }
4582                         }
4583                 }
4584                 self.context.value_to_self_msat = (self.context.value_to_self_msat as i64 + value_to_self_msat_diff) as u64;
4585
4586                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
4587                         match update_state {
4588                                 FeeUpdateState::Outbound => {
4589                                         debug_assert!(self.context.is_outbound());
4590                                         log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate);
4591                                         self.context.feerate_per_kw = feerate;
4592                                         self.context.pending_update_fee = None;
4593                                         self.context.expecting_peer_commitment_signed = true;
4594                                 },
4595                                 FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.context.is_outbound()); },
4596                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce => {
4597                                         debug_assert!(!self.context.is_outbound());
4598                                         log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
4599                                         require_commitment = true;
4600                                         self.context.feerate_per_kw = feerate;
4601                                         self.context.pending_update_fee = None;
4602                                 },
4603                         }
4604                 }
4605
4606                 let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update;
4607                 let release_state_str =
4608                         if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" };
4609                 macro_rules! return_with_htlcs_to_fail {
4610                         ($htlcs_to_fail: expr) => {
4611                                 if !release_monitor {
4612                                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
4613                                                 update: monitor_update,
4614                                         });
4615                                         return Ok(($htlcs_to_fail, None));
4616                                 } else {
4617                                         return Ok(($htlcs_to_fail, Some(monitor_update)));
4618                                 }
4619                         }
4620                 }
4621
4622                 if self.context.channel_state.is_monitor_update_in_progress() {
4623                         // We can't actually generate a new commitment transaction (incl by freeing holding
4624                         // cells) while we can't update the monitor, so we just return what we have.
4625                         if require_commitment {
4626                                 self.context.monitor_pending_commitment_signed = true;
4627                                 // When the monitor updating is restored we'll call
4628                                 // get_last_commitment_update_for_send(), which does not update state, but we're
4629                                 // definitely now awaiting a remote revoke before we can step forward any more, so
4630                                 // set it here.
4631                                 let mut additional_update = self.build_commitment_no_status_check(logger);
4632                                 // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4633                                 // strictly increasing by one, so decrement it here.
4634                                 self.context.latest_monitor_update_id = monitor_update.update_id;
4635                                 monitor_update.updates.append(&mut additional_update.updates);
4636                         }
4637                         self.context.monitor_pending_forwards.append(&mut to_forward_infos);
4638                         self.context.monitor_pending_failures.append(&mut revoked_htlcs);
4639                         self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
4640                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", &self.context.channel_id());
4641                         return_with_htlcs_to_fail!(Vec::new());
4642                 }
4643
4644                 match self.free_holding_cell_htlcs(fee_estimator, logger) {
4645                         (Some(mut additional_update), htlcs_to_fail) => {
4646                                 // free_holding_cell_htlcs may bump latest_monitor_id multiple times but we want them to be
4647                                 // strictly increasing by one, so decrement it here.
4648                                 self.context.latest_monitor_update_id = monitor_update.update_id;
4649                                 monitor_update.updates.append(&mut additional_update.updates);
4650
4651                                 log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.",
4652                                         &self.context.channel_id(), release_state_str);
4653
4654                                 self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4655                                 return_with_htlcs_to_fail!(htlcs_to_fail);
4656                         },
4657                         (None, htlcs_to_fail) => {
4658                                 if require_commitment {
4659                                         let mut additional_update = self.build_commitment_no_status_check(logger);
4660
4661                                         // build_commitment_no_status_check may bump latest_monitor_id but we want them to be
4662                                         // strictly increasing by one, so decrement it here.
4663                                         self.context.latest_monitor_update_id = monitor_update.update_id;
4664                                         monitor_update.updates.append(&mut additional_update.updates);
4665
4666                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.",
4667                                                 &self.context.channel_id(),
4668                                                 update_fail_htlcs.len() + update_fail_malformed_htlcs.len(),
4669                                                 release_state_str);
4670
4671                                         self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4672                                         return_with_htlcs_to_fail!(htlcs_to_fail);
4673                                 } else {
4674                                         log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.",
4675                                                 &self.context.channel_id(), release_state_str);
4676
4677                                         self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
4678                                         return_with_htlcs_to_fail!(htlcs_to_fail);
4679                                 }
4680                         }
4681                 }
4682         }
4683
4684         /// Queues up an outbound update fee by placing it in the holding cell. You should call
4685         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
4686         /// commitment update.
4687         pub fn queue_update_fee<F: Deref, L: Deref>(&mut self, feerate_per_kw: u32,
4688                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
4689         where F::Target: FeeEstimator, L::Target: Logger
4690         {
4691                 let msg_opt = self.send_update_fee(feerate_per_kw, true, fee_estimator, logger);
4692                 assert!(msg_opt.is_none(), "We forced holding cell?");
4693         }
4694
4695         /// Adds a pending update to this channel. See the doc for send_htlc for
4696         /// further details on the optionness of the return value.
4697         /// If our balance is too low to cover the cost of the next commitment transaction at the
4698         /// new feerate, the update is cancelled.
4699         ///
4700         /// You MUST call [`Self::send_commitment_no_state_update`] prior to any other calls on this
4701         /// [`Channel`] if `force_holding_cell` is false.
4702         fn send_update_fee<F: Deref, L: Deref>(
4703                 &mut self, feerate_per_kw: u32, mut force_holding_cell: bool,
4704                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
4705         ) -> Option<msgs::UpdateFee>
4706         where F::Target: FeeEstimator, L::Target: Logger
4707         {
4708                 if !self.context.is_outbound() {
4709                         panic!("Cannot send fee from inbound channel");
4710                 }
4711                 if !self.context.is_usable() {
4712                         panic!("Cannot update fee until channel is fully established and we haven't started shutting down");
4713                 }
4714                 if !self.context.is_live() {
4715                         panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)");
4716                 }
4717
4718                 // Before proposing a feerate update, check that we can actually afford the new fee.
4719                 let inbound_stats = self.context.get_inbound_pending_htlc_stats(Some(feerate_per_kw));
4720                 let outbound_stats = self.context.get_outbound_pending_htlc_stats(Some(feerate_per_kw));
4721                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
4722                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, true, logger);
4723                 let buffer_fee_msat = commit_tx_fee_sat(feerate_per_kw, commitment_stats.num_nondust_htlcs + outbound_stats.on_holder_tx_holding_cell_htlcs_count as usize + CONCURRENT_INBOUND_HTLC_FEE_BUFFER as usize, self.context.get_channel_type()) * 1000;
4724                 let holder_balance_msat = commitment_stats.local_balance_msat - outbound_stats.holding_cell_msat;
4725                 if holder_balance_msat < buffer_fee_msat  + self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000 {
4726                         //TODO: auto-close after a number of failures?
4727                         log_debug!(logger, "Cannot afford to send new feerate at {}", feerate_per_kw);
4728                         return None;
4729                 }
4730
4731                 // Note, we evaluate pending htlc "preemptive" trimmed-to-dust threshold at the proposed `feerate_per_kw`.
4732                 let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4733                 let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4734                 let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4735                 if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4736                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4737                         return None;
4738                 }
4739                 if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4740                         log_debug!(logger, "Cannot afford to send new feerate at {} without infringing max dust htlc exposure", feerate_per_kw);
4741                         return None;
4742                 }
4743
4744                 if self.context.channel_state.is_awaiting_remote_revoke() || self.context.channel_state.is_monitor_update_in_progress() {
4745                         force_holding_cell = true;
4746                 }
4747
4748                 if force_holding_cell {
4749                         self.context.holding_cell_update_fee = Some(feerate_per_kw);
4750                         return None;
4751                 }
4752
4753                 debug_assert!(self.context.pending_update_fee.is_none());
4754                 self.context.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound));
4755
4756                 Some(msgs::UpdateFee {
4757                         channel_id: self.context.channel_id,
4758                         feerate_per_kw,
4759                 })
4760         }
4761
4762         /// Removes any uncommitted inbound HTLCs and resets the state of uncommitted outbound HTLC
4763         /// updates, to be used on peer disconnection. After this, update_*_htlc messages need to be
4764         /// resent.
4765         /// No further message handling calls may be made until a channel_reestablish dance has
4766         /// completed.
4767         /// May return `Err(())`, which implies [`ChannelContext::force_shutdown`] should be called immediately.
4768         pub fn remove_uncommitted_htlcs_and_mark_paused<L: Deref>(&mut self, logger: &L) -> Result<(), ()> where L::Target: Logger {
4769                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
4770                 if self.context.channel_state.is_pre_funded_state() {
4771                         return Err(())
4772                 }
4773
4774                 if self.context.channel_state.is_peer_disconnected() {
4775                         // While the below code should be idempotent, it's simpler to just return early, as
4776                         // redundant disconnect events can fire, though they should be rare.
4777                         return Ok(());
4778                 }
4779
4780                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent || self.context.announcement_sigs_state == AnnouncementSigsState::Committed {
4781                         self.context.announcement_sigs_state = AnnouncementSigsState::NotSent;
4782                 }
4783
4784                 // Upon reconnect we have to start the closing_signed dance over, but shutdown messages
4785                 // will be retransmitted.
4786                 self.context.last_sent_closing_fee = None;
4787                 self.context.pending_counterparty_closing_signed = None;
4788                 self.context.closing_fee_limits = None;
4789
4790                 let mut inbound_drop_count = 0;
4791                 self.context.pending_inbound_htlcs.retain(|htlc| {
4792                         match htlc.state {
4793                                 InboundHTLCState::RemoteAnnounced(_) => {
4794                                         // They sent us an update_add_htlc but we never got the commitment_signed.
4795                                         // We'll tell them what commitment_signed we're expecting next and they'll drop
4796                                         // this HTLC accordingly
4797                                         inbound_drop_count += 1;
4798                                         false
4799                                 },
4800                                 InboundHTLCState::AwaitingRemoteRevokeToAnnounce(_)|InboundHTLCState::AwaitingAnnouncedRemoteRevoke(_) => {
4801                                         // We received a commitment_signed updating this HTLC and (at least hopefully)
4802                                         // sent a revoke_and_ack (which we can re-transmit) and have heard nothing
4803                                         // in response to it yet, so don't touch it.
4804                                         true
4805                                 },
4806                                 InboundHTLCState::Committed => true,
4807                                 InboundHTLCState::LocalRemoved(_) => {
4808                                         // We (hopefully) sent a commitment_signed updating this HTLC (which we can
4809                                         // re-transmit if needed) and they may have even sent a revoke_and_ack back
4810                                         // (that we missed). Keep this around for now and if they tell us they missed
4811                                         // the commitment_signed we can re-transmit the update then.
4812                                         true
4813                                 },
4814                         }
4815                 });
4816                 self.context.next_counterparty_htlc_id -= inbound_drop_count;
4817
4818                 if let Some((_, update_state)) = self.context.pending_update_fee {
4819                         if update_state == FeeUpdateState::RemoteAnnounced {
4820                                 debug_assert!(!self.context.is_outbound());
4821                                 self.context.pending_update_fee = None;
4822                         }
4823                 }
4824
4825                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
4826                         if let OutboundHTLCState::RemoteRemoved(_) = htlc.state {
4827                                 // They sent us an update to remove this but haven't yet sent the corresponding
4828                                 // commitment_signed, we need to move it back to Committed and they can re-send
4829                                 // the update upon reconnection.
4830                                 htlc.state = OutboundHTLCState::Committed;
4831                         }
4832                 }
4833
4834                 self.context.sent_message_awaiting_response = None;
4835
4836                 self.context.channel_state.set_peer_disconnected();
4837                 log_trace!(logger, "Peer disconnection resulted in {} remote-announced HTLC drops on channel {}", inbound_drop_count, &self.context.channel_id());
4838                 Ok(())
4839         }
4840
4841         /// Indicates that a ChannelMonitor update is in progress and has not yet been fully persisted.
4842         /// This must be called before we return the [`ChannelMonitorUpdate`] back to the
4843         /// [`ChannelManager`], which will call [`Self::monitor_updating_restored`] once the monitor
4844         /// update completes (potentially immediately).
4845         /// The messages which were generated with the monitor update must *not* have been sent to the
4846         /// remote end, and must instead have been dropped. They will be regenerated when
4847         /// [`Self::monitor_updating_restored`] is called.
4848         ///
4849         /// [`ChannelManager`]: super::channelmanager::ChannelManager
4850         /// [`chain::Watch`]: crate::chain::Watch
4851         /// [`ChannelMonitorUpdateStatus::InProgress`]: crate::chain::ChannelMonitorUpdateStatus::InProgress
4852         fn monitor_updating_paused(&mut self, resend_raa: bool, resend_commitment: bool,
4853                 resend_channel_ready: bool, mut pending_forwards: Vec<(PendingHTLCInfo, u64)>,
4854                 mut pending_fails: Vec<(HTLCSource, PaymentHash, HTLCFailReason)>,
4855                 mut pending_finalized_claimed_htlcs: Vec<HTLCSource>
4856         ) {
4857                 self.context.monitor_pending_revoke_and_ack |= resend_raa;
4858                 self.context.monitor_pending_commitment_signed |= resend_commitment;
4859                 self.context.monitor_pending_channel_ready |= resend_channel_ready;
4860                 self.context.monitor_pending_forwards.append(&mut pending_forwards);
4861                 self.context.monitor_pending_failures.append(&mut pending_fails);
4862                 self.context.monitor_pending_finalized_fulfills.append(&mut pending_finalized_claimed_htlcs);
4863                 self.context.channel_state.set_monitor_update_in_progress();
4864         }
4865
4866         /// Indicates that the latest ChannelMonitor update has been committed by the client
4867         /// successfully and we should restore normal operation. Returns messages which should be sent
4868         /// to the remote side.
4869         pub fn monitor_updating_restored<L: Deref, NS: Deref>(
4870                 &mut self, logger: &L, node_signer: &NS, chain_hash: ChainHash,
4871                 user_config: &UserConfig, best_block_height: u32
4872         ) -> MonitorRestoreUpdates
4873         where
4874                 L::Target: Logger,
4875                 NS::Target: NodeSigner
4876         {
4877                 assert!(self.context.channel_state.is_monitor_update_in_progress());
4878                 self.context.channel_state.clear_monitor_update_in_progress();
4879
4880                 // If we're past (or at) the AwaitingChannelReady stage on an outbound channel, try to
4881                 // (re-)broadcast the funding transaction as we may have declined to broadcast it when we
4882                 // first received the funding_signed.
4883                 let mut funding_broadcastable =
4884                         if self.context.is_outbound() &&
4885                                 (matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if !flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH)) ||
4886                                 matches!(self.context.channel_state, ChannelState::ChannelReady(_)))
4887                         {
4888                                 self.context.funding_transaction.take()
4889                         } else { None };
4890                 // That said, if the funding transaction is already confirmed (ie we're active with a
4891                 // minimum_depth over 0) don't bother re-broadcasting the confirmed funding tx.
4892                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) && self.context.minimum_depth != Some(0) {
4893                         funding_broadcastable = None;
4894                 }
4895
4896                 // We will never broadcast the funding transaction when we're in MonitorUpdateInProgress
4897                 // (and we assume the user never directly broadcasts the funding transaction and waits for
4898                 // us to do it). Thus, we can only ever hit monitor_pending_channel_ready when we're
4899                 // * an inbound channel that failed to persist the monitor on funding_created and we got
4900                 //   the funding transaction confirmed before the monitor was persisted, or
4901                 // * a 0-conf channel and intended to send the channel_ready before any broadcast at all.
4902                 let channel_ready = if self.context.monitor_pending_channel_ready {
4903                         assert!(!self.context.is_outbound() || self.context.minimum_depth == Some(0),
4904                                 "Funding transaction broadcast by the local client before it should have - LDK didn't do it!");
4905                         self.context.monitor_pending_channel_ready = false;
4906                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
4907                         Some(msgs::ChannelReady {
4908                                 channel_id: self.context.channel_id(),
4909                                 next_per_commitment_point,
4910                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
4911                         })
4912                 } else { None };
4913
4914                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block_height, logger);
4915
4916                 let mut accepted_htlcs = Vec::new();
4917                 mem::swap(&mut accepted_htlcs, &mut self.context.monitor_pending_forwards);
4918                 let mut failed_htlcs = Vec::new();
4919                 mem::swap(&mut failed_htlcs, &mut self.context.monitor_pending_failures);
4920                 let mut finalized_claimed_htlcs = Vec::new();
4921                 mem::swap(&mut finalized_claimed_htlcs, &mut self.context.monitor_pending_finalized_fulfills);
4922
4923                 if self.context.channel_state.is_peer_disconnected() {
4924                         self.context.monitor_pending_revoke_and_ack = false;
4925                         self.context.monitor_pending_commitment_signed = false;
4926                         return MonitorRestoreUpdates {
4927                                 raa: None, commitment_update: None, order: RAACommitmentOrder::RevokeAndACKFirst,
4928                                 accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4929                         };
4930                 }
4931
4932                 let raa = if self.context.monitor_pending_revoke_and_ack {
4933                         Some(self.get_last_revoke_and_ack())
4934                 } else { None };
4935                 let commitment_update = if self.context.monitor_pending_commitment_signed {
4936                         self.get_last_commitment_update_for_send(logger).ok()
4937                 } else { None };
4938                 if commitment_update.is_some() {
4939                         self.mark_awaiting_response();
4940                 }
4941
4942                 self.context.monitor_pending_revoke_and_ack = false;
4943                 self.context.monitor_pending_commitment_signed = false;
4944                 let order = self.context.resend_order.clone();
4945                 log_debug!(logger, "Restored monitor updating in channel {} resulting in {}{} commitment update and {} RAA, with {} first",
4946                         &self.context.channel_id(), if funding_broadcastable.is_some() { "a funding broadcastable, " } else { "" },
4947                         if commitment_update.is_some() { "a" } else { "no" }, if raa.is_some() { "an" } else { "no" },
4948                         match order { RAACommitmentOrder::CommitmentFirst => "commitment", RAACommitmentOrder::RevokeAndACKFirst => "RAA"});
4949                 MonitorRestoreUpdates {
4950                         raa, commitment_update, order, accepted_htlcs, failed_htlcs, finalized_claimed_htlcs, funding_broadcastable, channel_ready, announcement_sigs
4951                 }
4952         }
4953
4954         pub fn update_fee<F: Deref, L: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::UpdateFee, logger: &L) -> Result<(), ChannelError>
4955                 where F::Target: FeeEstimator, L::Target: Logger
4956         {
4957                 if self.context.is_outbound() {
4958                         return Err(ChannelError::Close("Non-funding remote tried to update channel fee".to_owned()));
4959                 }
4960                 if self.context.channel_state.is_peer_disconnected() {
4961                         return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned()));
4962                 }
4963                 Channel::<SP>::check_remote_fee(&self.context.channel_type, fee_estimator, msg.feerate_per_kw, Some(self.context.feerate_per_kw), logger)?;
4964
4965                 self.context.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced));
4966                 self.context.update_time_counter += 1;
4967                 // Check that we won't be pushed over our dust exposure limit by the feerate increase.
4968                 if !self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
4969                         let inbound_stats = self.context.get_inbound_pending_htlc_stats(None);
4970                         let outbound_stats = self.context.get_outbound_pending_htlc_stats(None);
4971                         let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat;
4972                         let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat;
4973                         let max_dust_htlc_exposure_msat = self.context.get_max_dust_htlc_exposure_msat(fee_estimator);
4974                         if holder_tx_dust_exposure > max_dust_htlc_exposure_msat {
4975                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)",
4976                                         msg.feerate_per_kw, holder_tx_dust_exposure)));
4977                         }
4978                         if counterparty_tx_dust_exposure > max_dust_htlc_exposure_msat {
4979                                 return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)",
4980                                         msg.feerate_per_kw, counterparty_tx_dust_exposure)));
4981                         }
4982                 }
4983                 Ok(())
4984         }
4985
4986         /// Indicates that the signer may have some signatures for us, so we should retry if we're
4987         /// blocked.
4988         #[cfg(async_signing)]
4989         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> SignerResumeUpdates where L::Target: Logger {
4990                 let commitment_update = if self.context.signer_pending_commitment_update {
4991                         self.get_last_commitment_update_for_send(logger).ok()
4992                 } else { None };
4993                 let funding_signed = if self.context.signer_pending_funding && !self.context.is_outbound() {
4994                         self.context.get_funding_signed_msg(logger).1
4995                 } else { None };
4996                 let channel_ready = if funding_signed.is_some() {
4997                         self.check_get_channel_ready(0)
4998                 } else { None };
4999
5000                 log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready",
5001                         if commitment_update.is_some() { "a" } else { "no" },
5002                         if funding_signed.is_some() { "a" } else { "no" },
5003                         if channel_ready.is_some() { "a" } else { "no" });
5004
5005                 SignerResumeUpdates {
5006                         commitment_update,
5007                         funding_signed,
5008                         channel_ready,
5009                 }
5010         }
5011
5012         fn get_last_revoke_and_ack(&self) -> msgs::RevokeAndACK {
5013                 let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5014                 let per_commitment_secret = self.context.holder_signer.as_ref().release_commitment_secret(self.context.cur_holder_commitment_transaction_number + 2);
5015                 msgs::RevokeAndACK {
5016                         channel_id: self.context.channel_id,
5017                         per_commitment_secret,
5018                         next_per_commitment_point,
5019                         #[cfg(taproot)]
5020                         next_local_nonce: None,
5021                 }
5022         }
5023
5024         /// Gets the last commitment update for immediate sending to our peer.
5025         fn get_last_commitment_update_for_send<L: Deref>(&mut self, logger: &L) -> Result<msgs::CommitmentUpdate, ()> where L::Target: Logger {
5026                 let mut update_add_htlcs = Vec::new();
5027                 let mut update_fulfill_htlcs = Vec::new();
5028                 let mut update_fail_htlcs = Vec::new();
5029                 let mut update_fail_malformed_htlcs = Vec::new();
5030
5031                 for htlc in self.context.pending_outbound_htlcs.iter() {
5032                         if let &OutboundHTLCState::LocalAnnounced(ref onion_packet) = &htlc.state {
5033                                 update_add_htlcs.push(msgs::UpdateAddHTLC {
5034                                         channel_id: self.context.channel_id(),
5035                                         htlc_id: htlc.htlc_id,
5036                                         amount_msat: htlc.amount_msat,
5037                                         payment_hash: htlc.payment_hash,
5038                                         cltv_expiry: htlc.cltv_expiry,
5039                                         onion_routing_packet: (**onion_packet).clone(),
5040                                         skimmed_fee_msat: htlc.skimmed_fee_msat,
5041                                         blinding_point: htlc.blinding_point,
5042                                 });
5043                         }
5044                 }
5045
5046                 for htlc in self.context.pending_inbound_htlcs.iter() {
5047                         if let &InboundHTLCState::LocalRemoved(ref reason) = &htlc.state {
5048                                 match reason {
5049                                         &InboundHTLCRemovalReason::FailRelay(ref err_packet) => {
5050                                                 update_fail_htlcs.push(msgs::UpdateFailHTLC {
5051                                                         channel_id: self.context.channel_id(),
5052                                                         htlc_id: htlc.htlc_id,
5053                                                         reason: err_packet.clone()
5054                                                 });
5055                                         },
5056                                         &InboundHTLCRemovalReason::FailMalformed((ref sha256_of_onion, ref failure_code)) => {
5057                                                 update_fail_malformed_htlcs.push(msgs::UpdateFailMalformedHTLC {
5058                                                         channel_id: self.context.channel_id(),
5059                                                         htlc_id: htlc.htlc_id,
5060                                                         sha256_of_onion: sha256_of_onion.clone(),
5061                                                         failure_code: failure_code.clone(),
5062                                                 });
5063                                         },
5064                                         &InboundHTLCRemovalReason::Fulfill(ref payment_preimage) => {
5065                                                 update_fulfill_htlcs.push(msgs::UpdateFulfillHTLC {
5066                                                         channel_id: self.context.channel_id(),
5067                                                         htlc_id: htlc.htlc_id,
5068                                                         payment_preimage: payment_preimage.clone(),
5069                                                 });
5070                                         },
5071                                 }
5072                         }
5073                 }
5074
5075                 let update_fee = if self.context.is_outbound() && self.context.pending_update_fee.is_some() {
5076                         Some(msgs::UpdateFee {
5077                                 channel_id: self.context.channel_id(),
5078                                 feerate_per_kw: self.context.pending_update_fee.unwrap().0,
5079                         })
5080                 } else { None };
5081
5082                 log_trace!(logger, "Regenerating latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds",
5083                                 &self.context.channel_id(), if update_fee.is_some() { " update_fee," } else { "" },
5084                                 update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len());
5085                 let commitment_signed = if let Ok(update) = self.send_commitment_no_state_update(logger).map(|(cu, _)| cu) {
5086                         if self.context.signer_pending_commitment_update {
5087                                 log_trace!(logger, "Commitment update generated: clearing signer_pending_commitment_update");
5088                                 self.context.signer_pending_commitment_update = false;
5089                         }
5090                         update
5091                 } else {
5092                         #[cfg(not(async_signing))] {
5093                                 panic!("Failed to get signature for new commitment state");
5094                         }
5095                         #[cfg(async_signing)] {
5096                                 if !self.context.signer_pending_commitment_update {
5097                                         log_trace!(logger, "Commitment update awaiting signer: setting signer_pending_commitment_update");
5098                                         self.context.signer_pending_commitment_update = true;
5099                                 }
5100                                 return Err(());
5101                         }
5102                 };
5103                 Ok(msgs::CommitmentUpdate {
5104                         update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee,
5105                         commitment_signed,
5106                 })
5107         }
5108
5109         /// Gets the `Shutdown` message we should send our peer on reconnect, if any.
5110         pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
5111                 if self.context.channel_state.is_local_shutdown_sent() {
5112                         assert!(self.context.shutdown_scriptpubkey.is_some());
5113                         Some(msgs::Shutdown {
5114                                 channel_id: self.context.channel_id,
5115                                 scriptpubkey: self.get_closing_scriptpubkey(),
5116                         })
5117                 } else { None }
5118         }
5119
5120         /// May panic if some calls other than message-handling calls (which will all Err immediately)
5121         /// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
5122         ///
5123         /// Some links printed in log lines are included here to check them during build (when run with
5124         /// `cargo doc --document-private-items`):
5125         /// [`super::channelmanager::ChannelManager::force_close_without_broadcasting_txn`] and
5126         /// [`super::channelmanager::ChannelManager::force_close_all_channels_without_broadcasting_txn`].
5127         pub fn channel_reestablish<L: Deref, NS: Deref>(
5128                 &mut self, msg: &msgs::ChannelReestablish, logger: &L, node_signer: &NS,
5129                 chain_hash: ChainHash, user_config: &UserConfig, best_block: &BestBlock
5130         ) -> Result<ReestablishResponses, ChannelError>
5131         where
5132                 L::Target: Logger,
5133                 NS::Target: NodeSigner
5134         {
5135                 if !self.context.channel_state.is_peer_disconnected() {
5136                         // While BOLT 2 doesn't indicate explicitly we should error this channel here, it
5137                         // almost certainly indicates we are going to end up out-of-sync in some way, so we
5138                         // just close here instead of trying to recover.
5139                         return Err(ChannelError::Close("Peer sent a loose channel_reestablish not after reconnect".to_owned()));
5140                 }
5141
5142                 if msg.next_local_commitment_number >= INITIAL_COMMITMENT_NUMBER || msg.next_remote_commitment_number >= INITIAL_COMMITMENT_NUMBER ||
5143                         msg.next_local_commitment_number == 0 {
5144                         return Err(ChannelError::Close("Peer sent an invalid channel_reestablish to force close in a non-standard way".to_owned()));
5145                 }
5146
5147                 let our_commitment_transaction = INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number - 1;
5148                 if msg.next_remote_commitment_number > 0 {
5149                         let expected_point = self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - msg.next_remote_commitment_number + 1, &self.context.secp_ctx);
5150                         let given_secret = SecretKey::from_slice(&msg.your_last_per_commitment_secret)
5151                                 .map_err(|_| ChannelError::Close("Peer sent a garbage channel_reestablish with unparseable secret key".to_owned()))?;
5152                         if expected_point != PublicKey::from_secret_key(&self.context.secp_ctx, &given_secret) {
5153                                 return Err(ChannelError::Close("Peer sent a garbage channel_reestablish with secret key not matching the commitment height provided".to_owned()));
5154                         }
5155                         if msg.next_remote_commitment_number > our_commitment_transaction {
5156                                 macro_rules! log_and_panic {
5157                                         ($err_msg: expr) => {
5158                                                 log_error!(logger, $err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5159                                                 panic!($err_msg, &self.context.channel_id, log_pubkey!(self.context.counterparty_node_id));
5160                                         }
5161                                 }
5162                                 log_and_panic!("We have fallen behind - we have received proof that if we broadcast our counterparty is going to claim all our funds.\n\
5163                                         This implies you have restarted with lost ChannelMonitor and ChannelManager state, the first of which is a violation of the LDK chain::Watch requirements.\n\
5164                                         More specifically, this means you have a bug in your implementation that can cause loss of funds, or you are running with an old backup, which is unsafe.\n\
5165                                         If you have restored from an old backup and wish to force-close channels and return to operation, you should start up, call\n\
5166                                         ChannelManager::force_close_without_broadcasting_txn on channel {} with counterparty {} or\n\
5167                                         ChannelManager::force_close_all_channels_without_broadcasting_txn, then reconnect to peer(s).\n\
5168                                         Note that due to a long-standing bug in lnd you may have to reach out to peers running lnd-based nodes to ask them to manually force-close channels\n\
5169                                         See https://github.com/lightningdevkit/rust-lightning/issues/1565 for more info.");
5170                         }
5171                 }
5172
5173                 // Before we change the state of the channel, we check if the peer is sending a very old
5174                 // commitment transaction number, if yes we send a warning message.
5175                 if msg.next_remote_commitment_number + 1 < our_commitment_transaction {
5176                         return Err(ChannelError::Warn(format!(
5177                                 "Peer attempted to reestablish channel with a very old local commitment transaction: {} (received) vs {} (expected)",
5178                                 msg.next_remote_commitment_number,
5179                                 our_commitment_transaction
5180                         )));
5181                 }
5182
5183                 // Go ahead and unmark PeerDisconnected as various calls we may make check for it (and all
5184                 // remaining cases either succeed or ErrorMessage-fail).
5185                 self.context.channel_state.clear_peer_disconnected();
5186                 self.context.sent_message_awaiting_response = None;
5187
5188                 let shutdown_msg = self.get_outbound_shutdown();
5189
5190                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, best_block.height(), logger);
5191
5192                 if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(_)) {
5193                         // If we're waiting on a monitor update, we shouldn't re-send any channel_ready's.
5194                         if !self.context.channel_state.is_our_channel_ready() ||
5195                                         self.context.channel_state.is_monitor_update_in_progress() {
5196                                 if msg.next_remote_commitment_number != 0 {
5197                                         return Err(ChannelError::Close("Peer claimed they saw a revoke_and_ack but we haven't sent channel_ready yet".to_owned()));
5198                                 }
5199                                 // Short circuit the whole handler as there is nothing we can resend them
5200                                 return Ok(ReestablishResponses {
5201                                         channel_ready: None,
5202                                         raa: None, commitment_update: None,
5203                                         order: RAACommitmentOrder::CommitmentFirst,
5204                                         shutdown_msg, announcement_sigs,
5205                                 });
5206                         }
5207
5208                         // We have OurChannelReady set!
5209                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5210                         return Ok(ReestablishResponses {
5211                                 channel_ready: Some(msgs::ChannelReady {
5212                                         channel_id: self.context.channel_id(),
5213                                         next_per_commitment_point,
5214                                         short_channel_id_alias: Some(self.context.outbound_scid_alias),
5215                                 }),
5216                                 raa: None, commitment_update: None,
5217                                 order: RAACommitmentOrder::CommitmentFirst,
5218                                 shutdown_msg, announcement_sigs,
5219                         });
5220                 }
5221
5222                 let required_revoke = if msg.next_remote_commitment_number == our_commitment_transaction {
5223                         // Remote isn't waiting on any RevokeAndACK from us!
5224                         // Note that if we need to repeat our ChannelReady we'll do that in the next if block.
5225                         None
5226                 } else if msg.next_remote_commitment_number + 1 == our_commitment_transaction {
5227                         if self.context.channel_state.is_monitor_update_in_progress() {
5228                                 self.context.monitor_pending_revoke_and_ack = true;
5229                                 None
5230                         } else {
5231                                 Some(self.get_last_revoke_and_ack())
5232                         }
5233                 } else {
5234                         debug_assert!(false, "All values should have been handled in the four cases above");
5235                         return Err(ChannelError::Close(format!(
5236                                 "Peer attempted to reestablish channel expecting a future local commitment transaction: {} (received) vs {} (expected)",
5237                                 msg.next_remote_commitment_number,
5238                                 our_commitment_transaction
5239                         )));
5240                 };
5241
5242                 // We increment cur_counterparty_commitment_transaction_number only upon receipt of
5243                 // revoke_and_ack, not on sending commitment_signed, so we add one if have
5244                 // AwaitingRemoteRevoke set, which indicates we sent a commitment_signed but haven't gotten
5245                 // the corresponding revoke_and_ack back yet.
5246                 let is_awaiting_remote_revoke = self.context.channel_state.is_awaiting_remote_revoke();
5247                 if is_awaiting_remote_revoke && !self.is_awaiting_monitor_update() {
5248                         self.mark_awaiting_response();
5249                 }
5250                 let next_counterparty_commitment_number = INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number + if is_awaiting_remote_revoke { 1 } else { 0 };
5251
5252                 let channel_ready = if msg.next_local_commitment_number == 1 && INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number == 1 {
5253                         // We should never have to worry about MonitorUpdateInProgress resending ChannelReady
5254                         let next_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
5255                         Some(msgs::ChannelReady {
5256                                 channel_id: self.context.channel_id(),
5257                                 next_per_commitment_point,
5258                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
5259                         })
5260                 } else { None };
5261
5262                 if msg.next_local_commitment_number == next_counterparty_commitment_number {
5263                         if required_revoke.is_some() {
5264                                 log_debug!(logger, "Reconnected channel {} with only lost outbound RAA", &self.context.channel_id());
5265                         } else {
5266                                 log_debug!(logger, "Reconnected channel {} with no loss", &self.context.channel_id());
5267                         }
5268
5269                         Ok(ReestablishResponses {
5270                                 channel_ready, shutdown_msg, announcement_sigs,
5271                                 raa: required_revoke,
5272                                 commitment_update: None,
5273                                 order: self.context.resend_order.clone(),
5274                         })
5275                 } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
5276                         if required_revoke.is_some() {
5277                                 log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", &self.context.channel_id());
5278                         } else {
5279                                 log_debug!(logger, "Reconnected channel {} with only lost remote commitment tx", &self.context.channel_id());
5280                         }
5281
5282                         if self.context.channel_state.is_monitor_update_in_progress() {
5283                                 self.context.monitor_pending_commitment_signed = true;
5284                                 Ok(ReestablishResponses {
5285                                         channel_ready, shutdown_msg, announcement_sigs,
5286                                         commitment_update: None, raa: None,
5287                                         order: self.context.resend_order.clone(),
5288                                 })
5289                         } else {
5290                                 Ok(ReestablishResponses {
5291                                         channel_ready, shutdown_msg, announcement_sigs,
5292                                         raa: required_revoke,
5293                                         commitment_update: self.get_last_commitment_update_for_send(logger).ok(),
5294                                         order: self.context.resend_order.clone(),
5295                                 })
5296                         }
5297                 } else if msg.next_local_commitment_number < next_counterparty_commitment_number {
5298                         Err(ChannelError::Close(format!(
5299                                 "Peer attempted to reestablish channel with a very old remote commitment transaction: {} (received) vs {} (expected)",
5300                                 msg.next_local_commitment_number,
5301                                 next_counterparty_commitment_number,
5302                         )))
5303                 } else {
5304                         Err(ChannelError::Close(format!(
5305                                 "Peer attempted to reestablish channel with a future remote commitment transaction: {} (received) vs {} (expected)",
5306                                 msg.next_local_commitment_number,
5307                                 next_counterparty_commitment_number,
5308                         )))
5309                 }
5310         }
5311
5312         /// Calculates and returns our minimum and maximum closing transaction fee amounts, in whole
5313         /// satoshis. The amounts remain consistent unless a peer disconnects/reconnects or we restart,
5314         /// at which point they will be recalculated.
5315         fn calculate_closing_fee_limits<F: Deref>(&mut self, fee_estimator: &LowerBoundedFeeEstimator<F>)
5316                 -> (u64, u64)
5317                 where F::Target: FeeEstimator
5318         {
5319                 if let Some((min, max)) = self.context.closing_fee_limits { return (min, max); }
5320
5321                 // Propose a range from our current Background feerate to our Normal feerate plus our
5322                 // force_close_avoidance_max_fee_satoshis.
5323                 // If we fail to come to consensus, we'll have to force-close.
5324                 let mut proposed_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::ChannelCloseMinimum);
5325                 // Use NonAnchorChannelFee because this should be an estimate for a channel close
5326                 // that we don't expect to need fee bumping
5327                 let normal_feerate = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
5328                 let mut proposed_max_feerate = if self.context.is_outbound() { normal_feerate } else { u32::max_value() };
5329
5330                 // The spec requires that (when the channel does not have anchors) we only send absolute
5331                 // channel fees no greater than the absolute channel fee on the current commitment
5332                 // transaction. It's unclear *which* commitment transaction this refers to, and there isn't
5333                 // very good reason to apply such a limit in any case. We don't bother doing so, risking
5334                 // some force-closure by old nodes, but we wanted to close the channel anyway.
5335
5336                 if let Some(target_feerate) = self.context.target_closing_feerate_sats_per_kw {
5337                         let min_feerate = if self.context.is_outbound() { target_feerate } else { cmp::min(self.context.feerate_per_kw, target_feerate) };
5338                         proposed_feerate = cmp::max(proposed_feerate, min_feerate);
5339                         proposed_max_feerate = cmp::max(proposed_max_feerate, min_feerate);
5340                 }
5341
5342                 // Note that technically we could end up with a lower minimum fee if one sides' balance is
5343                 // below our dust limit, causing the output to disappear. We don't bother handling this
5344                 // case, however, as this should only happen if a channel is closed before any (material)
5345                 // payments have been made on it. This may cause slight fee overpayment and/or failure to
5346                 // come to consensus with our counterparty on appropriate fees, however it should be a
5347                 // relatively rare case. We can revisit this later, though note that in order to determine
5348                 // if the funders' output is dust we have to know the absolute fee we're going to use.
5349                 let tx_weight = self.get_closing_transaction_weight(Some(&self.get_closing_scriptpubkey()), Some(self.context.counterparty_shutdown_scriptpubkey.as_ref().unwrap()));
5350                 let proposed_total_fee_satoshis = proposed_feerate as u64 * tx_weight / 1000;
5351                 let proposed_max_total_fee_satoshis = if self.context.is_outbound() {
5352                                 // We always add force_close_avoidance_max_fee_satoshis to our normal
5353                                 // feerate-calculated fee, but allow the max to be overridden if we're using a
5354                                 // target feerate-calculated fee.
5355                                 cmp::max(normal_feerate as u64 * tx_weight / 1000 + self.context.config.options.force_close_avoidance_max_fee_satoshis,
5356                                         proposed_max_feerate as u64 * tx_weight / 1000)
5357                         } else {
5358                                 self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000
5359                         };
5360
5361                 self.context.closing_fee_limits = Some((proposed_total_fee_satoshis, proposed_max_total_fee_satoshis));
5362                 self.context.closing_fee_limits.clone().unwrap()
5363         }
5364
5365         /// Returns true if we're ready to commence the closing_signed negotiation phase. This is true
5366         /// after both sides have exchanged a `shutdown` message and all HTLCs have been drained. At
5367         /// this point if we're the funder we should send the initial closing_signed, and in any case
5368         /// shutdown should complete within a reasonable timeframe.
5369         fn closing_negotiation_ready(&self) -> bool {
5370                 self.context.closing_negotiation_ready()
5371         }
5372
5373         /// Checks if the closing_signed negotiation is making appropriate progress, possibly returning
5374         /// an Err if no progress is being made and the channel should be force-closed instead.
5375         /// Should be called on a one-minute timer.
5376         pub fn timer_check_closing_negotiation_progress(&mut self) -> Result<(), ChannelError> {
5377                 if self.closing_negotiation_ready() {
5378                         if self.context.closing_signed_in_flight {
5379                                 return Err(ChannelError::Close("closing_signed negotiation failed to finish within two timer ticks".to_owned()));
5380                         } else {
5381                                 self.context.closing_signed_in_flight = true;
5382                         }
5383                 }
5384                 Ok(())
5385         }
5386
5387         pub fn maybe_propose_closing_signed<F: Deref, L: Deref>(
5388                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L)
5389                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5390                 where F::Target: FeeEstimator, L::Target: Logger
5391         {
5392                 // If we're waiting on a monitor persistence, that implies we're also waiting to send some
5393                 // message to our counterparty (probably a `revoke_and_ack`). In such a case, we shouldn't
5394                 // initiate `closing_signed` negotiation until we're clear of all pending messages. Note
5395                 // that closing_negotiation_ready checks this case (as well as a few others).
5396                 if self.context.last_sent_closing_fee.is_some() || !self.closing_negotiation_ready() {
5397                         return Ok((None, None, None));
5398                 }
5399
5400                 if !self.context.is_outbound() {
5401                         if let Some(msg) = &self.context.pending_counterparty_closing_signed.take() {
5402                                 return self.closing_signed(fee_estimator, &msg);
5403                         }
5404                         return Ok((None, None, None));
5405                 }
5406
5407                 // If we're waiting on a counterparty `commitment_signed` to clear some updates from our
5408                 // local commitment transaction, we can't yet initiate `closing_signed` negotiation.
5409                 if self.context.expecting_peer_commitment_signed {
5410                         return Ok((None, None, None));
5411                 }
5412
5413                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5414
5415                 assert!(self.context.shutdown_scriptpubkey.is_some());
5416                 let (closing_tx, total_fee_satoshis) = self.build_closing_transaction(our_min_fee, false);
5417                 log_trace!(logger, "Proposing initial closing_signed for our counterparty with a fee range of {}-{} sat (with initial proposal {} sats)",
5418                         our_min_fee, our_max_fee, total_fee_satoshis);
5419
5420                 match &self.context.holder_signer {
5421                         ChannelSignerType::Ecdsa(ecdsa) => {
5422                                 let sig = ecdsa
5423                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5424                                         .map_err(|()| ChannelError::Close("Failed to get signature for closing transaction.".to_owned()))?;
5425
5426                                 self.context.last_sent_closing_fee = Some((total_fee_satoshis, sig.clone()));
5427                                 Ok((Some(msgs::ClosingSigned {
5428                                         channel_id: self.context.channel_id,
5429                                         fee_satoshis: total_fee_satoshis,
5430                                         signature: sig,
5431                                         fee_range: Some(msgs::ClosingSignedFeeRange {
5432                                                 min_fee_satoshis: our_min_fee,
5433                                                 max_fee_satoshis: our_max_fee,
5434                                         }),
5435                                 }), None, None))
5436                         },
5437                         // TODO (taproot|arik)
5438                         #[cfg(taproot)]
5439                         _ => todo!()
5440                 }
5441         }
5442
5443         // Marks a channel as waiting for a response from the counterparty. If it's not received
5444         // [`DISCONNECT_PEER_AWAITING_RESPONSE_TICKS`] after sending our own to them, then we'll attempt
5445         // a reconnection.
5446         fn mark_awaiting_response(&mut self) {
5447                 self.context.sent_message_awaiting_response = Some(0);
5448         }
5449
5450         /// Determines whether we should disconnect the counterparty due to not receiving a response
5451         /// within our expected timeframe.
5452         ///
5453         /// This should be called on every [`super::channelmanager::ChannelManager::timer_tick_occurred`].
5454         pub fn should_disconnect_peer_awaiting_response(&mut self) -> bool {
5455                 let ticks_elapsed = if let Some(ticks_elapsed) = self.context.sent_message_awaiting_response.as_mut() {
5456                         ticks_elapsed
5457                 } else {
5458                         // Don't disconnect when we're not waiting on a response.
5459                         return false;
5460                 };
5461                 *ticks_elapsed += 1;
5462                 *ticks_elapsed >= DISCONNECT_PEER_AWAITING_RESPONSE_TICKS
5463         }
5464
5465         pub fn shutdown(
5466                 &mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
5467         ) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
5468         {
5469                 if self.context.channel_state.is_peer_disconnected() {
5470                         return Err(ChannelError::Close("Peer sent shutdown when we needed a channel_reestablish".to_owned()));
5471                 }
5472                 if self.context.channel_state.is_pre_funded_state() {
5473                         // Spec says we should fail the connection, not the channel, but that's nonsense, there
5474                         // are plenty of reasons you may want to fail a channel pre-funding, and spec says you
5475                         // can do that via error message without getting a connection fail anyway...
5476                         return Err(ChannelError::Close("Peer sent shutdown pre-funding generation".to_owned()));
5477                 }
5478                 for htlc in self.context.pending_inbound_htlcs.iter() {
5479                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
5480                                 return Err(ChannelError::Close("Got shutdown with remote pending HTLCs".to_owned()));
5481                         }
5482                 }
5483                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
5484
5485                 if !script::is_bolt2_compliant(&msg.scriptpubkey, their_features) {
5486                         return Err(ChannelError::Warn(format!("Got a nonstandard scriptpubkey ({}) from remote peer", msg.scriptpubkey.to_hex_string())));
5487                 }
5488
5489                 if self.context.counterparty_shutdown_scriptpubkey.is_some() {
5490                         if Some(&msg.scriptpubkey) != self.context.counterparty_shutdown_scriptpubkey.as_ref() {
5491                                 return Err(ChannelError::Warn(format!("Got shutdown request with a scriptpubkey ({}) which did not match their previous scriptpubkey.", msg.scriptpubkey.to_hex_string())));
5492                         }
5493                 } else {
5494                         self.context.counterparty_shutdown_scriptpubkey = Some(msg.scriptpubkey.clone());
5495                 }
5496
5497                 // If we have any LocalAnnounced updates we'll probably just get back an update_fail_htlc
5498                 // immediately after the commitment dance, but we can send a Shutdown because we won't send
5499                 // any further commitment updates after we set LocalShutdownSent.
5500                 let send_shutdown = !self.context.channel_state.is_local_shutdown_sent();
5501
5502                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
5503                         Some(_) => false,
5504                         None => {
5505                                 assert!(send_shutdown);
5506                                 let shutdown_scriptpubkey = match signer_provider.get_shutdown_scriptpubkey() {
5507                                         Ok(scriptpubkey) => scriptpubkey,
5508                                         Err(_) => return Err(ChannelError::Close("Failed to get shutdown scriptpubkey".to_owned())),
5509                                 };
5510                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
5511                                         return Err(ChannelError::Close(format!("Provided a scriptpubkey format not accepted by peer: {}", shutdown_scriptpubkey)));
5512                                 }
5513                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
5514                                 true
5515                         },
5516                 };
5517
5518                 // From here on out, we may not fail!
5519
5520                 self.context.channel_state.set_remote_shutdown_sent();
5521                 self.context.update_time_counter += 1;
5522
5523                 let monitor_update = if update_shutdown_script {
5524                         self.context.latest_monitor_update_id += 1;
5525                         let monitor_update = ChannelMonitorUpdate {
5526                                 update_id: self.context.latest_monitor_update_id,
5527                                 counterparty_node_id: Some(self.context.counterparty_node_id),
5528                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
5529                                         scriptpubkey: self.get_closing_scriptpubkey(),
5530                                 }],
5531                                 channel_id: Some(self.context.channel_id()),
5532                         };
5533                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
5534                         self.push_ret_blockable_mon_update(monitor_update)
5535                 } else { None };
5536                 let shutdown = if send_shutdown {
5537                         Some(msgs::Shutdown {
5538                                 channel_id: self.context.channel_id,
5539                                 scriptpubkey: self.get_closing_scriptpubkey(),
5540                         })
5541                 } else { None };
5542
5543                 // We can't send our shutdown until we've committed all of our pending HTLCs, but the
5544                 // remote side is unlikely to accept any new HTLCs, so we go ahead and "free" any holding
5545                 // cell HTLCs and return them to fail the payment.
5546                 self.context.holding_cell_update_fee = None;
5547                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
5548                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
5549                         match htlc_update {
5550                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
5551                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
5552                                         false
5553                                 },
5554                                 _ => true
5555                         }
5556                 });
5557
5558                 self.context.channel_state.set_local_shutdown_sent();
5559                 self.context.update_time_counter += 1;
5560
5561                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
5562         }
5563
5564         fn build_signed_closing_transaction(&self, closing_tx: &ClosingTransaction, counterparty_sig: &Signature, sig: &Signature) -> Transaction {
5565                 let mut tx = closing_tx.trust().built_transaction().clone();
5566
5567                 tx.input[0].witness.push(Vec::new()); // First is the multisig dummy
5568
5569                 let funding_key = self.context.get_holder_pubkeys().funding_pubkey.serialize();
5570                 let counterparty_funding_key = self.context.counterparty_funding_pubkey().serialize();
5571                 let mut holder_sig = sig.serialize_der().to_vec();
5572                 holder_sig.push(EcdsaSighashType::All as u8);
5573                 let mut cp_sig = counterparty_sig.serialize_der().to_vec();
5574                 cp_sig.push(EcdsaSighashType::All as u8);
5575                 if funding_key[..] < counterparty_funding_key[..] {
5576                         tx.input[0].witness.push(holder_sig);
5577                         tx.input[0].witness.push(cp_sig);
5578                 } else {
5579                         tx.input[0].witness.push(cp_sig);
5580                         tx.input[0].witness.push(holder_sig);
5581                 }
5582
5583                 tx.input[0].witness.push(self.context.get_funding_redeemscript().into_bytes());
5584                 tx
5585         }
5586
5587         pub fn closing_signed<F: Deref>(
5588                 &mut self, fee_estimator: &LowerBoundedFeeEstimator<F>, msg: &msgs::ClosingSigned)
5589                 -> Result<(Option<msgs::ClosingSigned>, Option<Transaction>, Option<ShutdownResult>), ChannelError>
5590                 where F::Target: FeeEstimator
5591         {
5592                 if !self.context.channel_state.is_both_sides_shutdown() {
5593                         return Err(ChannelError::Close("Remote end sent us a closing_signed before both sides provided a shutdown".to_owned()));
5594                 }
5595                 if self.context.channel_state.is_peer_disconnected() {
5596                         return Err(ChannelError::Close("Peer sent closing_signed when we needed a channel_reestablish".to_owned()));
5597                 }
5598                 if !self.context.pending_inbound_htlcs.is_empty() || !self.context.pending_outbound_htlcs.is_empty() {
5599                         return Err(ChannelError::Close("Remote end sent us a closing_signed while there were still pending HTLCs".to_owned()));
5600                 }
5601                 if msg.fee_satoshis > TOTAL_BITCOIN_SUPPLY_SATOSHIS { // this is required to stop potential overflow in build_closing_transaction
5602                         return Err(ChannelError::Close("Remote tried to send us a closing tx with > 21 million BTC fee".to_owned()));
5603                 }
5604
5605                 if self.context.is_outbound() && self.context.last_sent_closing_fee.is_none() {
5606                         return Err(ChannelError::Close("Remote tried to send a closing_signed when we were supposed to propose the first one".to_owned()));
5607                 }
5608
5609                 if self.context.channel_state.is_monitor_update_in_progress() {
5610                         self.context.pending_counterparty_closing_signed = Some(msg.clone());
5611                         return Ok((None, None, None));
5612                 }
5613
5614                 let funding_redeemscript = self.context.get_funding_redeemscript();
5615                 let (mut closing_tx, used_total_fee) = self.build_closing_transaction(msg.fee_satoshis, false);
5616                 if used_total_fee != msg.fee_satoshis {
5617                         return Err(ChannelError::Close(format!("Remote sent us a closing_signed with a fee other than the value they can claim. Fee in message: {}. Actual closing tx fee: {}", msg.fee_satoshis, used_total_fee)));
5618                 }
5619                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5620
5621                 match self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
5622                         Ok(_) => {},
5623                         Err(_e) => {
5624                                 // The remote end may have decided to revoke their output due to inconsistent dust
5625                                 // limits, so check for that case by re-checking the signature here.
5626                                 closing_tx = self.build_closing_transaction(msg.fee_satoshis, true).0;
5627                                 let sighash = closing_tx.trust().get_sighash_all(&funding_redeemscript, self.context.channel_value_satoshis);
5628                                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, self.context.counterparty_funding_pubkey()), "Invalid closing tx signature from peer".to_owned());
5629                         },
5630                 };
5631
5632                 for outp in closing_tx.trust().built_transaction().output.iter() {
5633                         if !outp.script_pubkey.is_witness_program() && outp.value < MAX_STD_OUTPUT_DUST_LIMIT_SATOSHIS {
5634                                 return Err(ChannelError::Close("Remote sent us a closing_signed with a dust output. Always use segwit closing scripts!".to_owned()));
5635                         }
5636                 }
5637
5638                 let closure_reason = if self.initiated_shutdown() {
5639                         ClosureReason::LocallyInitiatedCooperativeClosure
5640                 } else {
5641                         ClosureReason::CounterpartyInitiatedCooperativeClosure
5642                 };
5643
5644                 assert!(self.context.shutdown_scriptpubkey.is_some());
5645                 if let Some((last_fee, sig)) = self.context.last_sent_closing_fee {
5646                         if last_fee == msg.fee_satoshis {
5647                                 let shutdown_result = ShutdownResult {
5648                                         closure_reason,
5649                                         monitor_update: None,
5650                                         dropped_outbound_htlcs: Vec::new(),
5651                                         unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5652                                         channel_id: self.context.channel_id,
5653                                         user_channel_id: self.context.user_id,
5654                                         channel_capacity_satoshis: self.context.channel_value_satoshis,
5655                                         counterparty_node_id: self.context.counterparty_node_id,
5656                                         unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5657                                         channel_funding_txo: self.context.get_funding_txo(),
5658                                 };
5659                                 let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig);
5660                                 self.context.channel_state = ChannelState::ShutdownComplete;
5661                                 self.context.update_time_counter += 1;
5662                                 return Ok((None, Some(tx), Some(shutdown_result)));
5663                         }
5664                 }
5665
5666                 let (our_min_fee, our_max_fee) = self.calculate_closing_fee_limits(fee_estimator);
5667
5668                 macro_rules! propose_fee {
5669                         ($new_fee: expr) => {
5670                                 let (closing_tx, used_fee) = if $new_fee == msg.fee_satoshis {
5671                                         (closing_tx, $new_fee)
5672                                 } else {
5673                                         self.build_closing_transaction($new_fee, false)
5674                                 };
5675
5676                                 return match &self.context.holder_signer {
5677                                         ChannelSignerType::Ecdsa(ecdsa) => {
5678                                                 let sig = ecdsa
5679                                                         .sign_closing_transaction(&closing_tx, &self.context.secp_ctx)
5680                                                         .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?;
5681                                                 let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis {
5682                                                         let shutdown_result = ShutdownResult {
5683                                                                 closure_reason,
5684                                                                 monitor_update: None,
5685                                                                 dropped_outbound_htlcs: Vec::new(),
5686                                                                 unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(),
5687                                                                 channel_id: self.context.channel_id,
5688                                                                 user_channel_id: self.context.user_id,
5689                                                                 channel_capacity_satoshis: self.context.channel_value_satoshis,
5690                                                                 counterparty_node_id: self.context.counterparty_node_id,
5691                                                                 unbroadcasted_funding_tx: self.context.unbroadcasted_funding(),
5692                                                                 channel_funding_txo: self.context.get_funding_txo(),
5693                                                         };
5694                                                         self.context.channel_state = ChannelState::ShutdownComplete;
5695                                                         self.context.update_time_counter += 1;
5696                                                         let tx = self.build_signed_closing_transaction(&closing_tx, &msg.signature, &sig);
5697                                                         (Some(tx), Some(shutdown_result))
5698                                                 } else {
5699                                                         (None, None)
5700                                                 };
5701
5702                                                 self.context.last_sent_closing_fee = Some((used_fee, sig.clone()));
5703                                                 Ok((Some(msgs::ClosingSigned {
5704                                                         channel_id: self.context.channel_id,
5705                                                         fee_satoshis: used_fee,
5706                                                         signature: sig,
5707                                                         fee_range: Some(msgs::ClosingSignedFeeRange {
5708                                                                 min_fee_satoshis: our_min_fee,
5709                                                                 max_fee_satoshis: our_max_fee,
5710                                                         }),
5711                                                 }), signed_tx, shutdown_result))
5712                                         },
5713                                         // TODO (taproot|arik)
5714                                         #[cfg(taproot)]
5715                                         _ => todo!()
5716                                 }
5717                         }
5718                 }
5719
5720                 if let Some(msgs::ClosingSignedFeeRange { min_fee_satoshis, max_fee_satoshis }) = msg.fee_range {
5721                         if msg.fee_satoshis < min_fee_satoshis || msg.fee_satoshis > max_fee_satoshis {
5722                                 return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in their desired range of {} sat - {} sat", msg.fee_satoshis, min_fee_satoshis, max_fee_satoshis)));
5723                         }
5724                         if max_fee_satoshis < our_min_fee {
5725                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's max fee ({} sat) was smaller than our min fee ({} sat)", max_fee_satoshis, our_min_fee)));
5726                         }
5727                         if min_fee_satoshis > our_max_fee {
5728                                 return Err(ChannelError::Warn(format!("Unable to come to consensus about closing feerate, remote's min fee ({} sat) was greater than our max fee ({} sat)", min_fee_satoshis, our_max_fee)));
5729                         }
5730
5731                         if !self.context.is_outbound() {
5732                                 // They have to pay, so pick the highest fee in the overlapping range.
5733                                 // We should never set an upper bound aside from their full balance
5734                                 debug_assert_eq!(our_max_fee, self.context.channel_value_satoshis - (self.context.value_to_self_msat + 999) / 1000);
5735                                 propose_fee!(cmp::min(max_fee_satoshis, our_max_fee));
5736                         } else {
5737                                 if msg.fee_satoshis < our_min_fee || msg.fee_satoshis > our_max_fee {
5738                                         return Err(ChannelError::Close(format!("Peer sent a bogus closing_signed - suggested fee of {} sat was not in our desired range of {} sat - {} sat after we informed them of our range.",
5739                                                 msg.fee_satoshis, our_min_fee, our_max_fee)));
5740                                 }
5741                                 // The proposed fee is in our acceptable range, accept it and broadcast!
5742                                 propose_fee!(msg.fee_satoshis);
5743                         }
5744                 } else {
5745                         // Old fee style negotiation. We don't bother to enforce whether they are complying
5746                         // with the "making progress" requirements, we just comply and hope for the best.
5747                         if let Some((last_fee, _)) = self.context.last_sent_closing_fee {
5748                                 if msg.fee_satoshis > last_fee {
5749                                         if msg.fee_satoshis < our_max_fee {
5750                                                 propose_fee!(msg.fee_satoshis);
5751                                         } else if last_fee < our_max_fee {
5752                                                 propose_fee!(our_max_fee);
5753                                         } else {
5754                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) higher than our max fee ({} sat)", msg.fee_satoshis, our_max_fee)));
5755                                         }
5756                                 } else {
5757                                         if msg.fee_satoshis > our_min_fee {
5758                                                 propose_fee!(msg.fee_satoshis);
5759                                         } else if last_fee > our_min_fee {
5760                                                 propose_fee!(our_min_fee);
5761                                         } else {
5762                                                 return Err(ChannelError::Close(format!("Unable to come to consensus about closing feerate, remote wants something ({} sat) lower than our min fee ({} sat)", msg.fee_satoshis, our_min_fee)));
5763                                         }
5764                                 }
5765                         } else {
5766                                 if msg.fee_satoshis < our_min_fee {
5767                                         propose_fee!(our_min_fee);
5768                                 } else if msg.fee_satoshis > our_max_fee {
5769                                         propose_fee!(our_max_fee);
5770                                 } else {
5771                                         propose_fee!(msg.fee_satoshis);
5772                                 }
5773                         }
5774                 }
5775         }
5776
5777         fn internal_htlc_satisfies_config(
5778                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32, config: &ChannelConfig,
5779         ) -> Result<(), (&'static str, u16)> {
5780                 let fee = amt_to_forward.checked_mul(config.forwarding_fee_proportional_millionths as u64)
5781                         .and_then(|prop_fee| (prop_fee / 1000000).checked_add(config.forwarding_fee_base_msat as u64));
5782                 if fee.is_none() || htlc.amount_msat < fee.unwrap() ||
5783                         (htlc.amount_msat - fee.unwrap()) < amt_to_forward {
5784                         return Err((
5785                                 "Prior hop has deviated from specified fees parameters or origin node has obsolete ones",
5786                                 0x1000 | 12, // fee_insufficient
5787                         ));
5788                 }
5789                 if (htlc.cltv_expiry as u64) < outgoing_cltv_value as u64 + config.cltv_expiry_delta as u64 {
5790                         return Err((
5791                                 "Forwarding node has tampered with the intended HTLC values or origin node has an obsolete cltv_expiry_delta",
5792                                 0x1000 | 13, // incorrect_cltv_expiry
5793                         ));
5794                 }
5795                 Ok(())
5796         }
5797
5798         /// Determines whether the parameters of an incoming HTLC to be forwarded satisfy the channel's
5799         /// [`ChannelConfig`]. This first looks at the channel's current [`ChannelConfig`], and if
5800         /// unsuccessful, falls back to the previous one if one exists.
5801         pub fn htlc_satisfies_config(
5802                 &self, htlc: &msgs::UpdateAddHTLC, amt_to_forward: u64, outgoing_cltv_value: u32,
5803         ) -> Result<(), (&'static str, u16)> {
5804                 self.internal_htlc_satisfies_config(&htlc, amt_to_forward, outgoing_cltv_value, &self.context.config())
5805                         .or_else(|err| {
5806                                 if let Some(prev_config) = self.context.prev_config() {
5807                                         self.internal_htlc_satisfies_config(htlc, amt_to_forward, outgoing_cltv_value, &prev_config)
5808                                 } else {
5809                                         Err(err)
5810                                 }
5811                         })
5812         }
5813
5814         pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {
5815                 self.context.cur_holder_commitment_transaction_number + 1
5816         }
5817
5818         pub fn get_cur_counterparty_commitment_transaction_number(&self) -> u64 {
5819                 self.context.cur_counterparty_commitment_transaction_number + 1 - if self.context.channel_state.is_awaiting_remote_revoke() { 1 } else { 0 }
5820         }
5821
5822         pub fn get_revoked_counterparty_commitment_transaction_number(&self) -> u64 {
5823                 self.context.cur_counterparty_commitment_transaction_number + 2
5824         }
5825
5826         #[cfg(test)]
5827         pub fn get_signer(&self) -> &ChannelSignerType<SP> {
5828                 &self.context.holder_signer
5829         }
5830
5831         #[cfg(test)]
5832         pub fn get_value_stat(&self) -> ChannelValueStat {
5833                 ChannelValueStat {
5834                         value_to_self_msat: self.context.value_to_self_msat,
5835                         channel_value_msat: self.context.channel_value_satoshis * 1000,
5836                         channel_reserve_msat: self.context.counterparty_selected_channel_reserve_satoshis.unwrap() * 1000,
5837                         pending_outbound_htlcs_amount_msat: self.context.pending_outbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5838                         pending_inbound_htlcs_amount_msat: self.context.pending_inbound_htlcs.iter().map(|ref h| h.amount_msat).sum::<u64>(),
5839                         holding_cell_outbound_amount_msat: {
5840                                 let mut res = 0;
5841                                 for h in self.context.holding_cell_htlc_updates.iter() {
5842                                         match h {
5843                                                 &HTLCUpdateAwaitingACK::AddHTLC{amount_msat, .. } => {
5844                                                         res += amount_msat;
5845                                                 }
5846                                                 _ => {}
5847                                         }
5848                                 }
5849                                 res
5850                         },
5851                         counterparty_max_htlc_value_in_flight_msat: self.context.counterparty_max_htlc_value_in_flight_msat,
5852                         counterparty_dust_limit_msat: self.context.counterparty_dust_limit_satoshis * 1000,
5853                 }
5854         }
5855
5856         /// Returns true if this channel has been marked as awaiting a monitor update to move forward.
5857         /// Allowed in any state (including after shutdown)
5858         pub fn is_awaiting_monitor_update(&self) -> bool {
5859                 self.context.channel_state.is_monitor_update_in_progress()
5860         }
5861
5862         /// Gets the latest [`ChannelMonitorUpdate`] ID which has been released and is in-flight.
5863         pub fn get_latest_unblocked_monitor_update_id(&self) -> u64 {
5864                 if self.context.blocked_monitor_updates.is_empty() { return self.context.get_latest_monitor_update_id(); }
5865                 self.context.blocked_monitor_updates[0].update.update_id - 1
5866         }
5867
5868         /// Returns the next blocked monitor update, if one exists, and a bool which indicates a
5869         /// further blocked monitor update exists after the next.
5870         pub fn unblock_next_blocked_monitor_update(&mut self) -> Option<(ChannelMonitorUpdate, bool)> {
5871                 if self.context.blocked_monitor_updates.is_empty() { return None; }
5872                 Some((self.context.blocked_monitor_updates.remove(0).update,
5873                         !self.context.blocked_monitor_updates.is_empty()))
5874         }
5875
5876         /// Pushes a new monitor update into our monitor update queue, returning it if it should be
5877         /// immediately given to the user for persisting or `None` if it should be held as blocked.
5878         fn push_ret_blockable_mon_update(&mut self, update: ChannelMonitorUpdate)
5879         -> Option<ChannelMonitorUpdate> {
5880                 let release_monitor = self.context.blocked_monitor_updates.is_empty();
5881                 if !release_monitor {
5882                         self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate {
5883                                 update,
5884                         });
5885                         None
5886                 } else {
5887                         Some(update)
5888                 }
5889         }
5890
5891         pub fn blocked_monitor_updates_pending(&self) -> usize {
5892                 self.context.blocked_monitor_updates.len()
5893         }
5894
5895         /// Returns true if the channel is awaiting the persistence of the initial ChannelMonitor.
5896         /// If the channel is outbound, this implies we have not yet broadcasted the funding
5897         /// transaction. If the channel is inbound, this implies simply that the channel has not
5898         /// advanced state.
5899         pub fn is_awaiting_initial_mon_persist(&self) -> bool {
5900                 if !self.is_awaiting_monitor_update() { return false; }
5901                 if matches!(
5902                         self.context.channel_state, ChannelState::AwaitingChannelReady(flags)
5903                         if flags.clone().clear(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY | FundedStateFlags::PEER_DISCONNECTED | FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS | AwaitingChannelReadyFlags::WAITING_FOR_BATCH).is_empty()
5904                 ) {
5905                         // If we're not a 0conf channel, we'll be waiting on a monitor update with only
5906                         // AwaitingChannelReady set, though our peer could have sent their channel_ready.
5907                         debug_assert!(self.context.minimum_depth.unwrap_or(1) > 0);
5908                         return true;
5909                 }
5910                 if self.context.cur_holder_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 &&
5911                         self.context.cur_counterparty_commitment_transaction_number == INITIAL_COMMITMENT_NUMBER - 1 {
5912                         // If we're a 0-conf channel, we'll move beyond AwaitingChannelReady immediately even while
5913                         // waiting for the initial monitor persistence. Thus, we check if our commitment
5914                         // transaction numbers have both been iterated only exactly once (for the
5915                         // funding_signed), and we're awaiting monitor update.
5916                         //
5917                         // If we got here, we shouldn't have yet broadcasted the funding transaction (as the
5918                         // only way to get an awaiting-monitor-update state during initial funding is if the
5919                         // initial monitor persistence is still pending).
5920                         //
5921                         // Because deciding we're awaiting initial broadcast spuriously could result in
5922                         // funds-loss (as we don't have a monitor, but have the funding transaction confirmed),
5923                         // we hard-assert here, even in production builds.
5924                         if self.context.is_outbound() { assert!(self.context.funding_transaction.is_some()); }
5925                         assert!(self.context.monitor_pending_channel_ready);
5926                         assert_eq!(self.context.latest_monitor_update_id, 0);
5927                         return true;
5928                 }
5929                 false
5930         }
5931
5932         /// Returns true if our channel_ready has been sent
5933         pub fn is_our_channel_ready(&self) -> bool {
5934                 matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(flags) if flags.is_set(AwaitingChannelReadyFlags::OUR_CHANNEL_READY)) ||
5935                         matches!(self.context.channel_state, ChannelState::ChannelReady(_))
5936         }
5937
5938         /// Returns true if our peer has either initiated or agreed to shut down the channel.
5939         pub fn received_shutdown(&self) -> bool {
5940                 self.context.channel_state.is_remote_shutdown_sent()
5941         }
5942
5943         /// Returns true if we either initiated or agreed to shut down the channel.
5944         pub fn sent_shutdown(&self) -> bool {
5945                 self.context.channel_state.is_local_shutdown_sent()
5946         }
5947
5948         /// Returns true if we initiated to shut down the channel.
5949         pub fn initiated_shutdown(&self) -> bool {
5950                 self.context.local_initiated_shutdown.is_some()
5951         }
5952
5953         /// Returns true if this channel is fully shut down. True here implies that no further actions
5954         /// may/will be taken on this channel, and thus this object should be freed. Any future changes
5955         /// will be handled appropriately by the chain monitor.
5956         pub fn is_shutdown(&self) -> bool {
5957                 matches!(self.context.channel_state, ChannelState::ShutdownComplete)
5958         }
5959
5960         pub fn channel_update_status(&self) -> ChannelUpdateStatus {
5961                 self.context.channel_update_status
5962         }
5963
5964         pub fn set_channel_update_status(&mut self, status: ChannelUpdateStatus) {
5965                 self.context.update_time_counter += 1;
5966                 self.context.channel_update_status = status;
5967         }
5968
5969         fn check_get_channel_ready(&mut self, height: u32) -> Option<msgs::ChannelReady> {
5970                 // Called:
5971                 //  * always when a new block/transactions are confirmed with the new height
5972                 //  * when funding is signed with a height of 0
5973                 if self.context.funding_tx_confirmation_height == 0 && self.context.minimum_depth != Some(0) {
5974                         return None;
5975                 }
5976
5977                 let funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
5978                 if funding_tx_confirmations <= 0 {
5979                         self.context.funding_tx_confirmation_height = 0;
5980                 }
5981
5982                 if funding_tx_confirmations < self.context.minimum_depth.unwrap_or(0) as i64 {
5983                         return None;
5984                 }
5985
5986                 // If we're still pending the signature on a funding transaction, then we're not ready to send a
5987                 // channel_ready yet.
5988                 if self.context.signer_pending_funding {
5989                         return None;
5990                 }
5991
5992                 // Note that we don't include ChannelState::WaitingForBatch as we don't want to send
5993                 // channel_ready until the entire batch is ready.
5994                 let need_commitment_update = if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()).is_empty()) {
5995                         self.context.channel_state.set_our_channel_ready();
5996                         true
5997                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::THEIR_CHANNEL_READY) {
5998                         self.context.channel_state = ChannelState::ChannelReady(self.context.channel_state.with_funded_state_flags_mask().into());
5999                         self.context.update_time_counter += 1;
6000                         true
6001                 } else if matches!(self.context.channel_state, ChannelState::AwaitingChannelReady(f) if f.clone().clear(FundedStateFlags::ALL.into()) == AwaitingChannelReadyFlags::OUR_CHANNEL_READY) {
6002                         // We got a reorg but not enough to trigger a force close, just ignore.
6003                         false
6004                 } else {
6005                         if self.context.funding_tx_confirmation_height != 0 &&
6006                                 self.context.channel_state < ChannelState::ChannelReady(ChannelReadyFlags::new())
6007                         {
6008                                 // We should never see a funding transaction on-chain until we've received
6009                                 // funding_signed (if we're an outbound channel), or seen funding_generated (if we're
6010                                 // an inbound channel - before that we have no known funding TXID). The fuzzer,
6011                                 // however, may do this and we shouldn't treat it as a bug.
6012                                 #[cfg(not(fuzzing))]
6013                                 panic!("Started confirming a channel in a state pre-AwaitingChannelReady: {}.\n\
6014                                         Do NOT broadcast a funding transaction manually - let LDK do it for you!",
6015                                         self.context.channel_state.to_u32());
6016                         }
6017                         // We got a reorg but not enough to trigger a force close, just ignore.
6018                         false
6019                 };
6020
6021                 if need_commitment_update {
6022                         if !self.context.channel_state.is_monitor_update_in_progress() {
6023                                 if !self.context.channel_state.is_peer_disconnected() {
6024                                         let next_per_commitment_point =
6025                                                 self.context.holder_signer.as_ref().get_per_commitment_point(INITIAL_COMMITMENT_NUMBER - 1, &self.context.secp_ctx);
6026                                         return Some(msgs::ChannelReady {
6027                                                 channel_id: self.context.channel_id,
6028                                                 next_per_commitment_point,
6029                                                 short_channel_id_alias: Some(self.context.outbound_scid_alias),
6030                                         });
6031                                 }
6032                         } else {
6033                                 self.context.monitor_pending_channel_ready = true;
6034                         }
6035                 }
6036                 None
6037         }
6038
6039         /// When a transaction is confirmed, we check whether it is or spends the funding transaction
6040         /// In the first case, we store the confirmation height and calculating the short channel id.
6041         /// In the second, we simply return an Err indicating we need to be force-closed now.
6042         pub fn transactions_confirmed<NS: Deref, L: Deref>(
6043                 &mut self, block_hash: &BlockHash, height: u32, txdata: &TransactionData,
6044                 chain_hash: ChainHash, node_signer: &NS, user_config: &UserConfig, logger: &L
6045         ) -> Result<(Option<msgs::ChannelReady>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6046         where
6047                 NS::Target: NodeSigner,
6048                 L::Target: Logger
6049         {
6050                 let mut msgs = (None, None);
6051                 if let Some(funding_txo) = self.context.get_funding_txo() {
6052                         for &(index_in_block, tx) in txdata.iter() {
6053                                 // Check if the transaction is the expected funding transaction, and if it is,
6054                                 // check that it pays the right amount to the right script.
6055                                 if self.context.funding_tx_confirmation_height == 0 {
6056                                         if tx.txid() == funding_txo.txid {
6057                                                 let txo_idx = funding_txo.index as usize;
6058                                                 if txo_idx >= tx.output.len() || tx.output[txo_idx].script_pubkey != self.context.get_funding_redeemscript().to_v0_p2wsh() ||
6059                                                                 tx.output[txo_idx].value != self.context.channel_value_satoshis {
6060                                                         if self.context.is_outbound() {
6061                                                                 // If we generated the funding transaction and it doesn't match what it
6062                                                                 // should, the client is really broken and we should just panic and
6063                                                                 // tell them off. That said, because hash collisions happen with high
6064                                                                 // probability in fuzzing mode, if we're fuzzing we just close the
6065                                                                 // channel and move on.
6066                                                                 #[cfg(not(fuzzing))]
6067                                                                 panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6068                                                         }
6069                                                         self.context.update_time_counter += 1;
6070                                                         let err_reason = "funding tx had wrong script/value or output index";
6071                                                         return Err(ClosureReason::ProcessingError { err: err_reason.to_owned() });
6072                                                 } else {
6073                                                         if self.context.is_outbound() {
6074                                                                 if !tx.is_coin_base() {
6075                                                                         for input in tx.input.iter() {
6076                                                                                 if input.witness.is_empty() {
6077                                                                                         // We generated a malleable funding transaction, implying we've
6078                                                                                         // just exposed ourselves to funds loss to our counterparty.
6079                                                                                         #[cfg(not(fuzzing))]
6080                                                                                         panic!("Client called ChannelManager::funding_transaction_generated with bogus transaction!");
6081                                                                                 }
6082                                                                         }
6083                                                                 }
6084                                                         }
6085                                                         self.context.funding_tx_confirmation_height = height;
6086                                                         self.context.funding_tx_confirmed_in = Some(*block_hash);
6087                                                         self.context.short_channel_id = match scid_from_parts(height as u64, index_in_block as u64, txo_idx as u64) {
6088                                                                 Ok(scid) => Some(scid),
6089                                                                 Err(_) => panic!("Block was bogus - either height was > 16 million, had > 16 million transactions, or had > 65k outputs"),
6090                                                         }
6091                                                 }
6092                                                 // If this is a coinbase transaction and not a 0-conf channel
6093                                                 // we should update our min_depth to 100 to handle coinbase maturity
6094                                                 if tx.is_coin_base() &&
6095                                                         self.context.minimum_depth.unwrap_or(0) > 0 &&
6096                                                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
6097                                                         self.context.minimum_depth = Some(COINBASE_MATURITY);
6098                                                 }
6099                                         }
6100                                         // If we allow 1-conf funding, we may need to check for channel_ready here and
6101                                         // send it immediately instead of waiting for a best_block_updated call (which
6102                                         // may have already happened for this block).
6103                                         if let Some(channel_ready) = self.check_get_channel_ready(height) {
6104                                                 log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6105                                                 let announcement_sigs = self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger);
6106                                                 msgs = (Some(channel_ready), announcement_sigs);
6107                                         }
6108                                 }
6109                                 for inp in tx.input.iter() {
6110                                         if inp.previous_output == funding_txo.into_bitcoin_outpoint() {
6111                                                 log_info!(logger, "Detected channel-closing tx {} spending {}:{}, closing channel {}", tx.txid(), inp.previous_output.txid, inp.previous_output.vout, &self.context.channel_id());
6112                                                 return Err(ClosureReason::CommitmentTxConfirmed);
6113                                         }
6114                                 }
6115                         }
6116                 }
6117                 Ok(msgs)
6118         }
6119
6120         /// When a new block is connected, we check the height of the block against outbound holding
6121         /// cell HTLCs in case we need to give up on them prematurely and time them out. Everything
6122         /// else (e.g. commitment transaction broadcasts, HTLC transaction broadcasting, etc) is
6123         /// handled by the ChannelMonitor.
6124         ///
6125         /// If we return Err, the channel may have been closed, at which point the standard
6126         /// requirements apply - no calls may be made except those explicitly stated to be allowed
6127         /// post-shutdown.
6128         ///
6129         /// May return some HTLCs (and their payment_hash) which have timed out and should be failed
6130         /// back.
6131         pub fn best_block_updated<NS: Deref, L: Deref>(
6132                 &mut self, height: u32, highest_header_time: u32, chain_hash: ChainHash,
6133                 node_signer: &NS, user_config: &UserConfig, logger: &L
6134         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6135         where
6136                 NS::Target: NodeSigner,
6137                 L::Target: Logger
6138         {
6139                 self.do_best_block_updated(height, highest_header_time, Some((chain_hash, node_signer, user_config)), logger)
6140         }
6141
6142         fn do_best_block_updated<NS: Deref, L: Deref>(
6143                 &mut self, height: u32, highest_header_time: u32,
6144                 chain_node_signer: Option<(ChainHash, &NS, &UserConfig)>, logger: &L
6145         ) -> Result<(Option<msgs::ChannelReady>, Vec<(HTLCSource, PaymentHash)>, Option<msgs::AnnouncementSignatures>), ClosureReason>
6146         where
6147                 NS::Target: NodeSigner,
6148                 L::Target: Logger
6149         {
6150                 let mut timed_out_htlcs = Vec::new();
6151                 // This mirrors the check in ChannelManager::decode_update_add_htlc_onion, refusing to
6152                 // forward an HTLC when our counterparty should almost certainly just fail it for expiring
6153                 // ~now.
6154                 let unforwarded_htlc_cltv_limit = height + LATENCY_GRACE_PERIOD_BLOCKS;
6155                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6156                         match htlc_update {
6157                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, ref cltv_expiry, .. } => {
6158                                         if *cltv_expiry <= unforwarded_htlc_cltv_limit {
6159                                                 timed_out_htlcs.push((source.clone(), payment_hash.clone()));
6160                                                 false
6161                                         } else { true }
6162                                 },
6163                                 _ => true
6164                         }
6165                 });
6166
6167                 self.context.update_time_counter = cmp::max(self.context.update_time_counter, highest_header_time);
6168
6169                 if let Some(channel_ready) = self.check_get_channel_ready(height) {
6170                         let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6171                                 self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6172                         } else { None };
6173                         log_info!(logger, "Sending a channel_ready to our peer for channel {}", &self.context.channel_id);
6174                         return Ok((Some(channel_ready), timed_out_htlcs, announcement_sigs));
6175                 }
6176
6177                 if matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6178                         self.context.channel_state.is_our_channel_ready() {
6179                         let mut funding_tx_confirmations = height as i64 - self.context.funding_tx_confirmation_height as i64 + 1;
6180                         if self.context.funding_tx_confirmation_height == 0 {
6181                                 // Note that check_get_channel_ready may reset funding_tx_confirmation_height to
6182                                 // zero if it has been reorged out, however in either case, our state flags
6183                                 // indicate we've already sent a channel_ready
6184                                 funding_tx_confirmations = 0;
6185                         }
6186
6187                         // If we've sent channel_ready (or have both sent and received channel_ready), and
6188                         // the funding transaction has become unconfirmed,
6189                         // close the channel and hope we can get the latest state on chain (because presumably
6190                         // the funding transaction is at least still in the mempool of most nodes).
6191                         //
6192                         // Note that ideally we wouldn't force-close if we see *any* reorg on a 1-conf or
6193                         // 0-conf channel, but not doing so may lead to the
6194                         // `ChannelManager::short_to_chan_info` map  being inconsistent, so we currently have
6195                         // to.
6196                         if funding_tx_confirmations == 0 && self.context.funding_tx_confirmed_in.is_some() {
6197                                 let err_reason = format!("Funding transaction was un-confirmed. Locked at {} confs, now have {} confs.",
6198                                         self.context.minimum_depth.unwrap(), funding_tx_confirmations);
6199                                 return Err(ClosureReason::ProcessingError { err: err_reason });
6200                         }
6201                 } else if !self.context.is_outbound() && self.context.funding_tx_confirmed_in.is_none() &&
6202                                 height >= self.context.channel_creation_height + FUNDING_CONF_DEADLINE_BLOCKS {
6203                         log_info!(logger, "Closing channel {} due to funding timeout", &self.context.channel_id);
6204                         // If funding_tx_confirmed_in is unset, the channel must not be active
6205                         assert!(self.context.channel_state <= ChannelState::ChannelReady(ChannelReadyFlags::new()));
6206                         assert!(!self.context.channel_state.is_our_channel_ready());
6207                         return Err(ClosureReason::FundingTimedOut);
6208                 }
6209
6210                 let announcement_sigs = if let Some((chain_hash, node_signer, user_config)) = chain_node_signer {
6211                         self.get_announcement_sigs(node_signer, chain_hash, user_config, height, logger)
6212                 } else { None };
6213                 Ok((None, timed_out_htlcs, announcement_sigs))
6214         }
6215
6216         /// Indicates the funding transaction is no longer confirmed in the main chain. This may
6217         /// force-close the channel, but may also indicate a harmless reorganization of a block or two
6218         /// before the channel has reached channel_ready and we can just wait for more blocks.
6219         pub fn funding_transaction_unconfirmed<L: Deref>(&mut self, logger: &L) -> Result<(), ClosureReason> where L::Target: Logger {
6220                 if self.context.funding_tx_confirmation_height != 0 {
6221                         // We handle the funding disconnection by calling best_block_updated with a height one
6222                         // below where our funding was connected, implying a reorg back to conf_height - 1.
6223                         let reorg_height = self.context.funding_tx_confirmation_height - 1;
6224                         // We use the time field to bump the current time we set on channel updates if its
6225                         // larger. If we don't know that time has moved forward, we can just set it to the last
6226                         // time we saw and it will be ignored.
6227                         let best_time = self.context.update_time_counter;
6228                         match self.do_best_block_updated(reorg_height, best_time, None::<(ChainHash, &&dyn NodeSigner, &UserConfig)>, logger) {
6229                                 Ok((channel_ready, timed_out_htlcs, announcement_sigs)) => {
6230                                         assert!(channel_ready.is_none(), "We can't generate a funding with 0 confirmations?");
6231                                         assert!(timed_out_htlcs.is_empty(), "We can't have accepted HTLCs with a timeout before our funding confirmation?");
6232                                         assert!(announcement_sigs.is_none(), "We can't generate an announcement_sigs with 0 confirmations?");
6233                                         Ok(())
6234                                 },
6235                                 Err(e) => Err(e)
6236                         }
6237                 } else {
6238                         // We never learned about the funding confirmation anyway, just ignore
6239                         Ok(())
6240                 }
6241         }
6242
6243         // Methods to get unprompted messages to send to the remote end (or where we already returned
6244         // something in the handler for the message that prompted this message):
6245
6246         /// Gets an UnsignedChannelAnnouncement for this channel. The channel must be publicly
6247         /// announceable and available for use (have exchanged [`ChannelReady`] messages in both
6248         /// directions). Should be used for both broadcasted announcements and in response to an
6249         /// AnnouncementSignatures message from the remote peer.
6250         ///
6251         /// Will only fail if we're not in a state where channel_announcement may be sent (including
6252         /// closing).
6253         ///
6254         /// This will only return ChannelError::Ignore upon failure.
6255         ///
6256         /// [`ChannelReady`]: crate::ln::msgs::ChannelReady
6257         fn get_channel_announcement<NS: Deref>(
6258                 &self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6259         ) -> Result<msgs::UnsignedChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6260                 if !self.context.config.announced_channel {
6261                         return Err(ChannelError::Ignore("Channel is not available for public announcements".to_owned()));
6262                 }
6263                 if !self.context.is_usable() {
6264                         return Err(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel is not currently usable".to_owned()));
6265                 }
6266
6267                 let short_channel_id = self.context.get_short_channel_id()
6268                         .ok_or(ChannelError::Ignore("Cannot get a ChannelAnnouncement if the channel has not been confirmed yet".to_owned()))?;
6269                 let node_id = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6270                         .map_err(|_| ChannelError::Ignore("Failed to retrieve own public key".to_owned()))?);
6271                 let counterparty_node_id = NodeId::from_pubkey(&self.context.get_counterparty_node_id());
6272                 let were_node_one = node_id.as_slice() < counterparty_node_id.as_slice();
6273
6274                 let msg = msgs::UnsignedChannelAnnouncement {
6275                         features: channelmanager::provided_channel_features(&user_config),
6276                         chain_hash,
6277                         short_channel_id,
6278                         node_id_1: if were_node_one { node_id } else { counterparty_node_id },
6279                         node_id_2: if were_node_one { counterparty_node_id } else { node_id },
6280                         bitcoin_key_1: NodeId::from_pubkey(if were_node_one { &self.context.get_holder_pubkeys().funding_pubkey } else { self.context.counterparty_funding_pubkey() }),
6281                         bitcoin_key_2: NodeId::from_pubkey(if were_node_one { self.context.counterparty_funding_pubkey() } else { &self.context.get_holder_pubkeys().funding_pubkey }),
6282                         excess_data: Vec::new(),
6283                 };
6284
6285                 Ok(msg)
6286         }
6287
6288         fn get_announcement_sigs<NS: Deref, L: Deref>(
6289                 &mut self, node_signer: &NS, chain_hash: ChainHash, user_config: &UserConfig,
6290                 best_block_height: u32, logger: &L
6291         ) -> Option<msgs::AnnouncementSignatures>
6292         where
6293                 NS::Target: NodeSigner,
6294                 L::Target: Logger
6295         {
6296                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6297                         return None;
6298                 }
6299
6300                 if !self.context.is_usable() {
6301                         return None;
6302                 }
6303
6304                 if self.context.channel_state.is_peer_disconnected() {
6305                         log_trace!(logger, "Cannot create an announcement_signatures as our peer is disconnected");
6306                         return None;
6307                 }
6308
6309                 if self.context.announcement_sigs_state != AnnouncementSigsState::NotSent {
6310                         return None;
6311                 }
6312
6313                 log_trace!(logger, "Creating an announcement_signatures message for channel {}", &self.context.channel_id());
6314                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6315                         Ok(a) => a,
6316                         Err(e) => {
6317                                 log_trace!(logger, "{:?}", e);
6318                                 return None;
6319                         }
6320                 };
6321                 let our_node_sig = match node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement)) {
6322                         Err(_) => {
6323                                 log_error!(logger, "Failed to generate node signature for channel_announcement. Channel will not be announced!");
6324                                 return None;
6325                         },
6326                         Ok(v) => v
6327                 };
6328                 match &self.context.holder_signer {
6329                         ChannelSignerType::Ecdsa(ecdsa) => {
6330                                 let our_bitcoin_sig = match ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx) {
6331                                         Err(_) => {
6332                                                 log_error!(logger, "Signer rejected channel_announcement signing. Channel will not be announced!");
6333                                                 return None;
6334                                         },
6335                                         Ok(v) => v
6336                                 };
6337                                 let short_channel_id = match self.context.get_short_channel_id() {
6338                                         Some(scid) => scid,
6339                                         None => return None,
6340                                 };
6341
6342                                 self.context.announcement_sigs_state = AnnouncementSigsState::MessageSent;
6343
6344                                 Some(msgs::AnnouncementSignatures {
6345                                         channel_id: self.context.channel_id(),
6346                                         short_channel_id,
6347                                         node_signature: our_node_sig,
6348                                         bitcoin_signature: our_bitcoin_sig,
6349                                 })
6350                         },
6351                         // TODO (taproot|arik)
6352                         #[cfg(taproot)]
6353                         _ => todo!()
6354                 }
6355         }
6356
6357         /// Signs the given channel announcement, returning a ChannelError::Ignore if no keys are
6358         /// available.
6359         fn sign_channel_announcement<NS: Deref>(
6360                 &self, node_signer: &NS, announcement: msgs::UnsignedChannelAnnouncement
6361         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6362                 if let Some((their_node_sig, their_bitcoin_sig)) = self.context.announcement_sigs {
6363                         let our_node_key = NodeId::from_pubkey(&node_signer.get_node_id(Recipient::Node)
6364                                 .map_err(|_| ChannelError::Ignore("Signer failed to retrieve own public key".to_owned()))?);
6365                         let were_node_one = announcement.node_id_1 == our_node_key;
6366
6367                         let our_node_sig = node_signer.sign_gossip_message(msgs::UnsignedGossipMessage::ChannelAnnouncement(&announcement))
6368                                 .map_err(|_| ChannelError::Ignore("Failed to generate node signature for channel_announcement".to_owned()))?;
6369                         match &self.context.holder_signer {
6370                                 ChannelSignerType::Ecdsa(ecdsa) => {
6371                                         let our_bitcoin_sig = ecdsa.sign_channel_announcement_with_funding_key(&announcement, &self.context.secp_ctx)
6372                                                 .map_err(|_| ChannelError::Ignore("Signer rejected channel_announcement".to_owned()))?;
6373                                         Ok(msgs::ChannelAnnouncement {
6374                                                 node_signature_1: if were_node_one { our_node_sig } else { their_node_sig },
6375                                                 node_signature_2: if were_node_one { their_node_sig } else { our_node_sig },
6376                                                 bitcoin_signature_1: if were_node_one { our_bitcoin_sig } else { their_bitcoin_sig },
6377                                                 bitcoin_signature_2: if were_node_one { their_bitcoin_sig } else { our_bitcoin_sig },
6378                                                 contents: announcement,
6379                                         })
6380                                 },
6381                                 // TODO (taproot|arik)
6382                                 #[cfg(taproot)]
6383                                 _ => todo!()
6384                         }
6385                 } else {
6386                         Err(ChannelError::Ignore("Attempted to sign channel announcement before we'd received announcement_signatures".to_string()))
6387                 }
6388         }
6389
6390         /// Processes an incoming announcement_signatures message, providing a fully-signed
6391         /// channel_announcement message which we can broadcast and storing our counterparty's
6392         /// signatures for later reconstruction/rebroadcast of the channel_announcement.
6393         pub fn announcement_signatures<NS: Deref>(
6394                 &mut self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32,
6395                 msg: &msgs::AnnouncementSignatures, user_config: &UserConfig
6396         ) -> Result<msgs::ChannelAnnouncement, ChannelError> where NS::Target: NodeSigner {
6397                 let announcement = self.get_channel_announcement(node_signer, chain_hash, user_config)?;
6398
6399                 let msghash = hash_to_message!(&Sha256d::hash(&announcement.encode()[..])[..]);
6400
6401                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.node_signature, &self.context.get_counterparty_node_id()).is_err() {
6402                         return Err(ChannelError::Close(format!(
6403                                 "Bad announcement_signatures. Failed to verify node_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_node_key is {:?}",
6404                                  &announcement, self.context.get_counterparty_node_id())));
6405                 }
6406                 if self.context.secp_ctx.verify_ecdsa(&msghash, &msg.bitcoin_signature, self.context.counterparty_funding_pubkey()).is_err() {
6407                         return Err(ChannelError::Close(format!(
6408                                 "Bad announcement_signatures. Failed to verify bitcoin_signature. UnsignedChannelAnnouncement used for verification is {:?}. their_bitcoin_key is ({:?})",
6409                                 &announcement, self.context.counterparty_funding_pubkey())));
6410                 }
6411
6412                 self.context.announcement_sigs = Some((msg.node_signature, msg.bitcoin_signature));
6413                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6414                         return Err(ChannelError::Ignore(
6415                                 "Got announcement_signatures prior to the required six confirmations - we may not have received a block yet that our peer has".to_owned()));
6416                 }
6417
6418                 self.sign_channel_announcement(node_signer, announcement)
6419         }
6420
6421         /// Gets a signed channel_announcement for this channel, if we previously received an
6422         /// announcement_signatures from our counterparty.
6423         pub fn get_signed_channel_announcement<NS: Deref>(
6424                 &self, node_signer: &NS, chain_hash: ChainHash, best_block_height: u32, user_config: &UserConfig
6425         ) -> Option<msgs::ChannelAnnouncement> where NS::Target: NodeSigner {
6426                 if self.context.funding_tx_confirmation_height == 0 || self.context.funding_tx_confirmation_height + 5 > best_block_height {
6427                         return None;
6428                 }
6429                 let announcement = match self.get_channel_announcement(node_signer, chain_hash, user_config) {
6430                         Ok(res) => res,
6431                         Err(_) => return None,
6432                 };
6433                 match self.sign_channel_announcement(node_signer, announcement) {
6434                         Ok(res) => Some(res),
6435                         Err(_) => None,
6436                 }
6437         }
6438
6439         /// May panic if called on a channel that wasn't immediately-previously
6440         /// self.remove_uncommitted_htlcs_and_mark_paused()'d
6441         pub fn get_channel_reestablish<L: Deref>(&mut self, logger: &L) -> msgs::ChannelReestablish where L::Target: Logger {
6442                 assert!(self.context.channel_state.is_peer_disconnected());
6443                 assert_ne!(self.context.cur_counterparty_commitment_transaction_number, INITIAL_COMMITMENT_NUMBER);
6444                 // Prior to static_remotekey, my_current_per_commitment_point was critical to claiming
6445                 // current to_remote balances. However, it no longer has any use, and thus is now simply
6446                 // set to a dummy (but valid, as required by the spec) public key.
6447                 // fuzzing mode marks a subset of pubkeys as invalid so that we can hit "invalid pubkey"
6448                 // branches, but we unwrap it below, so we arbitrarily select a dummy pubkey which is both
6449                 // valid, and valid in fuzzing mode's arbitrary validity criteria:
6450                 let mut pk = [2; 33]; pk[1] = 0xff;
6451                 let dummy_pubkey = PublicKey::from_slice(&pk).unwrap();
6452                 let remote_last_secret = if self.context.cur_counterparty_commitment_transaction_number + 1 < INITIAL_COMMITMENT_NUMBER {
6453                         let remote_last_secret = self.context.commitment_secrets.get_secret(self.context.cur_counterparty_commitment_transaction_number + 2).unwrap();
6454                         log_trace!(logger, "Enough info to generate a Data Loss Protect with per_commitment_secret {} for channel {}", log_bytes!(remote_last_secret), &self.context.channel_id());
6455                         remote_last_secret
6456                 } else {
6457                         log_info!(logger, "Sending a data_loss_protect with no previous remote per_commitment_secret for channel {}", &self.context.channel_id());
6458                         [0;32]
6459                 };
6460                 self.mark_awaiting_response();
6461                 msgs::ChannelReestablish {
6462                         channel_id: self.context.channel_id(),
6463                         // The protocol has two different commitment number concepts - the "commitment
6464                         // transaction number", which starts from 0 and counts up, and the "revocation key
6465                         // index" which starts at INITIAL_COMMITMENT_NUMBER and counts down. We track
6466                         // commitment transaction numbers by the index which will be used to reveal the
6467                         // revocation key for that commitment transaction, which means we have to convert them
6468                         // to protocol-level commitment numbers here...
6469
6470                         // next_local_commitment_number is the next commitment_signed number we expect to
6471                         // receive (indicating if they need to resend one that we missed).
6472                         next_local_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_holder_commitment_transaction_number,
6473                         // We have to set next_remote_commitment_number to the next revoke_and_ack we expect to
6474                         // receive, however we track it by the next commitment number for a remote transaction
6475                         // (which is one further, as they always revoke previous commitment transaction, not
6476                         // the one we send) so we have to decrement by 1. Note that if
6477                         // cur_counterparty_commitment_transaction_number is INITIAL_COMMITMENT_NUMBER we will have
6478                         // dropped this channel on disconnect as it hasn't yet reached AwaitingChannelReady so we can't
6479                         // overflow here.
6480                         next_remote_commitment_number: INITIAL_COMMITMENT_NUMBER - self.context.cur_counterparty_commitment_transaction_number - 1,
6481                         your_last_per_commitment_secret: remote_last_secret,
6482                         my_current_per_commitment_point: dummy_pubkey,
6483                         // TODO(dual_funding): If we've sent `commtiment_signed` for an interactive transaction
6484                         // construction but have not received `tx_signatures` we MUST set `next_funding_txid` to the
6485                         // txid of that interactive transaction, else we MUST NOT set it.
6486                         next_funding_txid: None,
6487                 }
6488         }
6489
6490
6491         // Send stuff to our remote peers:
6492
6493         /// Queues up an outbound HTLC to send by placing it in the holding cell. You should call
6494         /// [`Self::maybe_free_holding_cell_htlcs`] in order to actually generate and send the
6495         /// commitment update.
6496         ///
6497         /// `Err`s will only be [`ChannelError::Ignore`].
6498         pub fn queue_add_htlc<F: Deref, L: Deref>(
6499                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6500                 onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6501                 blinding_point: Option<PublicKey>, fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6502         ) -> Result<(), ChannelError>
6503         where F::Target: FeeEstimator, L::Target: Logger
6504         {
6505                 self
6506                         .send_htlc(amount_msat, payment_hash, cltv_expiry, source, onion_routing_packet, true,
6507                                 skimmed_fee_msat, blinding_point, fee_estimator, logger)
6508                         .map(|msg_opt| assert!(msg_opt.is_none(), "We forced holding cell?"))
6509                         .map_err(|err| {
6510                                 if let ChannelError::Ignore(_) = err { /* fine */ }
6511                                 else { debug_assert!(false, "Queueing cannot trigger channel failure"); }
6512                                 err
6513                         })
6514         }
6515
6516         /// Adds a pending outbound HTLC to this channel, note that you probably want
6517         /// [`Self::send_htlc_and_commit`] instead cause you'll want both messages at once.
6518         ///
6519         /// This returns an optional UpdateAddHTLC as we may be in a state where we cannot add HTLCs on
6520         /// the wire:
6521         /// * In cases where we're waiting on the remote peer to send us a revoke_and_ack, we
6522         ///   wouldn't be able to determine what they actually ACK'ed if we have two sets of updates
6523         ///   awaiting ACK.
6524         /// * In cases where we're marked MonitorUpdateInProgress, we cannot commit to a new state as
6525         ///   we may not yet have sent the previous commitment update messages and will need to
6526         ///   regenerate them.
6527         ///
6528         /// You MUST call [`Self::send_commitment_no_state_update`] prior to calling any other methods
6529         /// on this [`Channel`] if `force_holding_cell` is false.
6530         ///
6531         /// `Err`s will only be [`ChannelError::Ignore`].
6532         fn send_htlc<F: Deref, L: Deref>(
6533                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32, source: HTLCSource,
6534                 onion_routing_packet: msgs::OnionPacket, mut force_holding_cell: bool,
6535                 skimmed_fee_msat: Option<u64>, blinding_point: Option<PublicKey>,
6536                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6537         ) -> Result<Option<msgs::UpdateAddHTLC>, ChannelError>
6538         where F::Target: FeeEstimator, L::Target: Logger
6539         {
6540                 if !matches!(self.context.channel_state, ChannelState::ChannelReady(_)) ||
6541                         self.context.channel_state.is_local_shutdown_sent() ||
6542                         self.context.channel_state.is_remote_shutdown_sent()
6543                 {
6544                         return Err(ChannelError::Ignore("Cannot send HTLC until channel is fully established and we haven't started shutting down".to_owned()));
6545                 }
6546                 let channel_total_msat = self.context.channel_value_satoshis * 1000;
6547                 if amount_msat > channel_total_msat {
6548                         return Err(ChannelError::Ignore(format!("Cannot send amount {}, because it is more than the total value of the channel {}", amount_msat, channel_total_msat)));
6549                 }
6550
6551                 if amount_msat == 0 {
6552                         return Err(ChannelError::Ignore("Cannot send 0-msat HTLC".to_owned()));
6553                 }
6554
6555                 let available_balances = self.context.get_available_balances(fee_estimator);
6556                 if amount_msat < available_balances.next_outbound_htlc_minimum_msat {
6557                         return Err(ChannelError::Ignore(format!("Cannot send less than our next-HTLC minimum - {} msat",
6558                                 available_balances.next_outbound_htlc_minimum_msat)));
6559                 }
6560
6561                 if amount_msat > available_balances.next_outbound_htlc_limit_msat {
6562                         return Err(ChannelError::Ignore(format!("Cannot send more than our next-HTLC maximum - {} msat",
6563                                 available_balances.next_outbound_htlc_limit_msat)));
6564                 }
6565
6566                 if self.context.channel_state.is_peer_disconnected() {
6567                         // Note that this should never really happen, if we're !is_live() on receipt of an
6568                         // incoming HTLC for relay will result in us rejecting the HTLC and we won't allow
6569                         // the user to send directly into a !is_live() channel. However, if we
6570                         // disconnected during the time the previous hop was doing the commitment dance we may
6571                         // end up getting here after the forwarding delay. In any case, returning an
6572                         // IgnoreError will get ChannelManager to do the right thing and fail backwards now.
6573                         return Err(ChannelError::Ignore("Cannot send an HTLC while disconnected from channel counterparty".to_owned()));
6574                 }
6575
6576                 let need_holding_cell = !self.context.channel_state.can_generate_new_commitment();
6577                 log_debug!(logger, "Pushing new outbound HTLC with hash {} for {} msat {}",
6578                         payment_hash, amount_msat,
6579                         if force_holding_cell { "into holding cell" }
6580                         else if need_holding_cell { "into holding cell as we're awaiting an RAA or monitor" }
6581                         else { "to peer" });
6582
6583                 if need_holding_cell {
6584                         force_holding_cell = true;
6585                 }
6586
6587                 // Now update local state:
6588                 if force_holding_cell {
6589                         self.context.holding_cell_htlc_updates.push(HTLCUpdateAwaitingACK::AddHTLC {
6590                                 amount_msat,
6591                                 payment_hash,
6592                                 cltv_expiry,
6593                                 source,
6594                                 onion_routing_packet,
6595                                 skimmed_fee_msat,
6596                                 blinding_point,
6597                         });
6598                         return Ok(None);
6599                 }
6600
6601                 self.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
6602                         htlc_id: self.context.next_holder_htlc_id,
6603                         amount_msat,
6604                         payment_hash: payment_hash.clone(),
6605                         cltv_expiry,
6606                         state: OutboundHTLCState::LocalAnnounced(Box::new(onion_routing_packet.clone())),
6607                         source,
6608                         blinding_point,
6609                         skimmed_fee_msat,
6610                 });
6611
6612                 let res = msgs::UpdateAddHTLC {
6613                         channel_id: self.context.channel_id,
6614                         htlc_id: self.context.next_holder_htlc_id,
6615                         amount_msat,
6616                         payment_hash,
6617                         cltv_expiry,
6618                         onion_routing_packet,
6619                         skimmed_fee_msat,
6620                         blinding_point,
6621                 };
6622                 self.context.next_holder_htlc_id += 1;
6623
6624                 Ok(Some(res))
6625         }
6626
6627         fn build_commitment_no_status_check<L: Deref>(&mut self, logger: &L) -> ChannelMonitorUpdate where L::Target: Logger {
6628                 log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed...");
6629                 // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we
6630                 // fail to generate this, we still are at least at a position where upgrading their status
6631                 // is acceptable.
6632                 for htlc in self.context.pending_inbound_htlcs.iter_mut() {
6633                         let new_state = if let &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref forward_info) = &htlc.state {
6634                                 Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone()))
6635                         } else { None };
6636                         if let Some(state) = new_state {
6637                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", &htlc.payment_hash);
6638                                 htlc.state = state;
6639                         }
6640                 }
6641                 for htlc in self.context.pending_outbound_htlcs.iter_mut() {
6642                         if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut outcome) = &mut htlc.state {
6643                                 log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", &htlc.payment_hash);
6644                                 // Grab the preimage, if it exists, instead of cloning
6645                                 let mut reason = OutboundHTLCOutcome::Success(None);
6646                                 mem::swap(outcome, &mut reason);
6647                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(reason);
6648                         }
6649                 }
6650                 if let Some((feerate, update_state)) = self.context.pending_update_fee {
6651                         if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce {
6652                                 debug_assert!(!self.context.is_outbound());
6653                                 log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate);
6654                                 self.context.feerate_per_kw = feerate;
6655                                 self.context.pending_update_fee = None;
6656                         }
6657                 }
6658                 self.context.resend_order = RAACommitmentOrder::RevokeAndACKFirst;
6659
6660                 let (mut htlcs_ref, counterparty_commitment_tx) =
6661                         self.build_commitment_no_state_update(logger);
6662                 let counterparty_commitment_txid = counterparty_commitment_tx.trust().txid();
6663                 let htlcs: Vec<(HTLCOutputInCommitment, Option<Box<HTLCSource>>)> =
6664                         htlcs_ref.drain(..).map(|(htlc, htlc_source)| (htlc, htlc_source.map(|source_ref| Box::new(source_ref.clone())))).collect();
6665
6666                 if self.context.announcement_sigs_state == AnnouncementSigsState::MessageSent {
6667                         self.context.announcement_sigs_state = AnnouncementSigsState::Committed;
6668                 }
6669
6670                 self.context.latest_monitor_update_id += 1;
6671                 let monitor_update = ChannelMonitorUpdate {
6672                         update_id: self.context.latest_monitor_update_id,
6673                         counterparty_node_id: Some(self.context.counterparty_node_id),
6674                         updates: vec![ChannelMonitorUpdateStep::LatestCounterpartyCommitmentTXInfo {
6675                                 commitment_txid: counterparty_commitment_txid,
6676                                 htlc_outputs: htlcs.clone(),
6677                                 commitment_number: self.context.cur_counterparty_commitment_transaction_number,
6678                                 their_per_commitment_point: self.context.counterparty_cur_commitment_point.unwrap(),
6679                                 feerate_per_kw: Some(counterparty_commitment_tx.feerate_per_kw()),
6680                                 to_broadcaster_value_sat: Some(counterparty_commitment_tx.to_broadcaster_value_sat()),
6681                                 to_countersignatory_value_sat: Some(counterparty_commitment_tx.to_countersignatory_value_sat()),
6682                         }],
6683                         channel_id: Some(self.context.channel_id()),
6684                 };
6685                 self.context.channel_state.set_awaiting_remote_revoke();
6686                 monitor_update
6687         }
6688
6689         fn build_commitment_no_state_update<L: Deref>(&self, logger: &L)
6690         -> (Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>, CommitmentTransaction)
6691         where L::Target: Logger
6692         {
6693                 let counterparty_keys = self.context.build_remote_transaction_keys();
6694                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6695                 let counterparty_commitment_tx = commitment_stats.tx;
6696
6697                 #[cfg(any(test, fuzzing))]
6698                 {
6699                         if !self.context.is_outbound() {
6700                                 let projected_commit_tx_info = self.context.next_remote_commitment_tx_fee_info_cached.lock().unwrap().take();
6701                                 *self.context.next_local_commitment_tx_fee_info_cached.lock().unwrap() = None;
6702                                 if let Some(info) = projected_commit_tx_info {
6703                                         let total_pending_htlcs = self.context.pending_inbound_htlcs.len() + self.context.pending_outbound_htlcs.len();
6704                                         if info.total_pending_htlcs == total_pending_htlcs
6705                                                 && info.next_holder_htlc_id == self.context.next_holder_htlc_id
6706                                                 && info.next_counterparty_htlc_id == self.context.next_counterparty_htlc_id
6707                                                 && info.feerate == self.context.feerate_per_kw {
6708                                                         let actual_fee = commit_tx_fee_msat(self.context.feerate_per_kw, commitment_stats.num_nondust_htlcs, self.context.get_channel_type());
6709                                                         assert_eq!(actual_fee, info.fee);
6710                                                 }
6711                                 }
6712                         }
6713                 }
6714
6715                 (commitment_stats.htlcs_included, counterparty_commitment_tx)
6716         }
6717
6718         /// Only fails in case of signer rejection. Used for channel_reestablish commitment_signed
6719         /// generation when we shouldn't change HTLC/channel state.
6720         fn send_commitment_no_state_update<L: Deref>(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger {
6721                 // Get the fee tests from `build_commitment_no_state_update`
6722                 #[cfg(any(test, fuzzing))]
6723                 self.build_commitment_no_state_update(logger);
6724
6725                 let counterparty_keys = self.context.build_remote_transaction_keys();
6726                 let commitment_stats = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger);
6727                 let counterparty_commitment_txid = commitment_stats.tx.trust().txid();
6728
6729                 match &self.context.holder_signer {
6730                         ChannelSignerType::Ecdsa(ecdsa) => {
6731                                 let (signature, htlc_signatures);
6732
6733                                 {
6734                                         let mut htlcs = Vec::with_capacity(commitment_stats.htlcs_included.len());
6735                                         for &(ref htlc, _) in commitment_stats.htlcs_included.iter() {
6736                                                 htlcs.push(htlc);
6737                                         }
6738
6739                                         let res = ecdsa.sign_counterparty_commitment(
6740                                                         &commitment_stats.tx,
6741                                                         commitment_stats.inbound_htlc_preimages,
6742                                                         commitment_stats.outbound_htlc_preimages,
6743                                                         &self.context.secp_ctx,
6744                                                 ).map_err(|_| ChannelError::Ignore("Failed to get signatures for new commitment_signed".to_owned()))?;
6745                                         signature = res.0;
6746                                         htlc_signatures = res.1;
6747
6748                                         log_trace!(logger, "Signed remote commitment tx {} (txid {}) with redeemscript {} -> {} in channel {}",
6749                                                 encode::serialize_hex(&commitment_stats.tx.trust().built_transaction().transaction),
6750                                                 &counterparty_commitment_txid, encode::serialize_hex(&self.context.get_funding_redeemscript()),
6751                                                 log_bytes!(signature.serialize_compact()[..]), &self.context.channel_id());
6752
6753                                         for (ref htlc_sig, ref htlc) in htlc_signatures.iter().zip(htlcs) {
6754                                                 log_trace!(logger, "Signed remote HTLC tx {} with redeemscript {} with pubkey {} -> {} in channel {}",
6755                                                         encode::serialize_hex(&chan_utils::build_htlc_transaction(&counterparty_commitment_txid, commitment_stats.feerate_per_kw, self.context.get_holder_selected_contest_delay(), htlc, &self.context.channel_type, &counterparty_keys.broadcaster_delayed_payment_key, &counterparty_keys.revocation_key)),
6756                                                         encode::serialize_hex(&chan_utils::get_htlc_redeemscript(&htlc, &self.context.channel_type, &counterparty_keys)),
6757                                                         log_bytes!(counterparty_keys.broadcaster_htlc_key.to_public_key().serialize()),
6758                                                         log_bytes!(htlc_sig.serialize_compact()[..]), &self.context.channel_id());
6759                                         }
6760                                 }
6761
6762                                 Ok((msgs::CommitmentSigned {
6763                                         channel_id: self.context.channel_id,
6764                                         signature,
6765                                         htlc_signatures,
6766                                         #[cfg(taproot)]
6767                                         partial_signature_with_nonce: None,
6768                                 }, (counterparty_commitment_txid, commitment_stats.htlcs_included)))
6769                         },
6770                         // TODO (taproot|arik)
6771                         #[cfg(taproot)]
6772                         _ => todo!()
6773                 }
6774         }
6775
6776         /// Adds a pending outbound HTLC to this channel, and builds a new remote commitment
6777         /// transaction and generates the corresponding [`ChannelMonitorUpdate`] in one go.
6778         ///
6779         /// Shorthand for calling [`Self::send_htlc`] followed by a commitment update, see docs on
6780         /// [`Self::send_htlc`] and [`Self::build_commitment_no_state_update`] for more info.
6781         pub fn send_htlc_and_commit<F: Deref, L: Deref>(
6782                 &mut self, amount_msat: u64, payment_hash: PaymentHash, cltv_expiry: u32,
6783                 source: HTLCSource, onion_routing_packet: msgs::OnionPacket, skimmed_fee_msat: Option<u64>,
6784                 fee_estimator: &LowerBoundedFeeEstimator<F>, logger: &L
6785         ) -> Result<Option<ChannelMonitorUpdate>, ChannelError>
6786         where F::Target: FeeEstimator, L::Target: Logger
6787         {
6788                 let send_res = self.send_htlc(amount_msat, payment_hash, cltv_expiry, source,
6789                         onion_routing_packet, false, skimmed_fee_msat, None, fee_estimator, logger);
6790                 if let Err(e) = &send_res { if let ChannelError::Ignore(_) = e {} else { debug_assert!(false, "Sending cannot trigger channel failure"); } }
6791                 match send_res? {
6792                         Some(_) => {
6793                                 let monitor_update = self.build_commitment_no_status_check(logger);
6794                                 self.monitor_updating_paused(false, true, false, Vec::new(), Vec::new(), Vec::new());
6795                                 Ok(self.push_ret_blockable_mon_update(monitor_update))
6796                         },
6797                         None => Ok(None)
6798                 }
6799         }
6800
6801         /// Applies the `ChannelUpdate` and returns a boolean indicating whether a change actually
6802         /// happened.
6803         pub fn channel_update(&mut self, msg: &msgs::ChannelUpdate) -> Result<bool, ChannelError> {
6804                 let new_forwarding_info = Some(CounterpartyForwardingInfo {
6805                         fee_base_msat: msg.contents.fee_base_msat,
6806                         fee_proportional_millionths: msg.contents.fee_proportional_millionths,
6807                         cltv_expiry_delta: msg.contents.cltv_expiry_delta
6808                 });
6809                 let did_change = self.context.counterparty_forwarding_info != new_forwarding_info;
6810                 if did_change {
6811                         self.context.counterparty_forwarding_info = new_forwarding_info;
6812                 }
6813
6814                 Ok(did_change)
6815         }
6816
6817         /// Begins the shutdown process, getting a message for the remote peer and returning all
6818         /// holding cell HTLCs for payment failure.
6819         pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures,
6820                 target_feerate_sats_per_kw: Option<u32>, override_shutdown_script: Option<ShutdownScript>)
6821         -> Result<(msgs::Shutdown, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), APIError>
6822         {
6823                 for htlc in self.context.pending_outbound_htlcs.iter() {
6824                         if let OutboundHTLCState::LocalAnnounced(_) = htlc.state {
6825                                 return Err(APIError::APIMisuseError{err: "Cannot begin shutdown with pending HTLCs. Process pending events first".to_owned()});
6826                         }
6827                 }
6828                 if self.context.channel_state.is_local_shutdown_sent() {
6829                         return Err(APIError::APIMisuseError{err: "Shutdown already in progress".to_owned()});
6830                 }
6831                 else if self.context.channel_state.is_remote_shutdown_sent() {
6832                         return Err(APIError::ChannelUnavailable{err: "Shutdown already in progress by remote".to_owned()});
6833                 }
6834                 if self.context.shutdown_scriptpubkey.is_some() && override_shutdown_script.is_some() {
6835                         return Err(APIError::APIMisuseError{err: "Cannot override shutdown script for a channel with one already set".to_owned()});
6836                 }
6837                 assert!(!matches!(self.context.channel_state, ChannelState::ShutdownComplete));
6838                 if self.context.channel_state.is_peer_disconnected() || self.context.channel_state.is_monitor_update_in_progress() {
6839                         return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()});
6840                 }
6841
6842                 let update_shutdown_script = match self.context.shutdown_scriptpubkey {
6843                         Some(_) => false,
6844                         None => {
6845                                 // use override shutdown script if provided
6846                                 let shutdown_scriptpubkey = match override_shutdown_script {
6847                                         Some(script) => script,
6848                                         None => {
6849                                                 // otherwise, use the shutdown scriptpubkey provided by the signer
6850                                                 match signer_provider.get_shutdown_scriptpubkey() {
6851                                                         Ok(scriptpubkey) => scriptpubkey,
6852                                                         Err(_) => return Err(APIError::ChannelUnavailable{err: "Failed to get shutdown scriptpubkey".to_owned()}),
6853                                                 }
6854                                         },
6855                                 };
6856                                 if !shutdown_scriptpubkey.is_compatible(their_features) {
6857                                         return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6858                                 }
6859                                 self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey);
6860                                 true
6861                         },
6862                 };
6863
6864                 // From here on out, we may not fail!
6865                 self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw;
6866                 self.context.channel_state.set_local_shutdown_sent();
6867                 self.context.local_initiated_shutdown = Some(());
6868                 self.context.update_time_counter += 1;
6869
6870                 let monitor_update = if update_shutdown_script {
6871                         self.context.latest_monitor_update_id += 1;
6872                         let monitor_update = ChannelMonitorUpdate {
6873                                 update_id: self.context.latest_monitor_update_id,
6874                                 counterparty_node_id: Some(self.context.counterparty_node_id),
6875                                 updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
6876                                         scriptpubkey: self.get_closing_scriptpubkey(),
6877                                 }],
6878                                 channel_id: Some(self.context.channel_id()),
6879                         };
6880                         self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
6881                         self.push_ret_blockable_mon_update(monitor_update)
6882                 } else { None };
6883                 let shutdown = msgs::Shutdown {
6884                         channel_id: self.context.channel_id,
6885                         scriptpubkey: self.get_closing_scriptpubkey(),
6886                 };
6887
6888                 // Go ahead and drop holding cell updates as we'd rather fail payments than wait to send
6889                 // our shutdown until we've committed all of the pending changes.
6890                 self.context.holding_cell_update_fee = None;
6891                 let mut dropped_outbound_htlcs = Vec::with_capacity(self.context.holding_cell_htlc_updates.len());
6892                 self.context.holding_cell_htlc_updates.retain(|htlc_update| {
6893                         match htlc_update {
6894                                 &HTLCUpdateAwaitingACK::AddHTLC { ref payment_hash, ref source, .. } => {
6895                                         dropped_outbound_htlcs.push((source.clone(), payment_hash.clone()));
6896                                         false
6897                                 },
6898                                 _ => true
6899                         }
6900                 });
6901
6902                 debug_assert!(!self.is_shutdown() || monitor_update.is_none(),
6903                         "we can't both complete shutdown and return a monitor update");
6904
6905                 Ok((shutdown, monitor_update, dropped_outbound_htlcs))
6906         }
6907
6908         pub fn inflight_htlc_sources(&self) -> impl Iterator<Item=(&HTLCSource, &PaymentHash)> {
6909                 self.context.holding_cell_htlc_updates.iter()
6910                         .flat_map(|htlc_update| {
6911                                 match htlc_update {
6912                                         HTLCUpdateAwaitingACK::AddHTLC { source, payment_hash, .. }
6913                                                 => Some((source, payment_hash)),
6914                                         _ => None,
6915                                 }
6916                         })
6917                         .chain(self.context.pending_outbound_htlcs.iter().map(|htlc| (&htlc.source, &htlc.payment_hash)))
6918         }
6919 }
6920
6921 /// A not-yet-funded outbound (from holder) channel using V1 channel establishment.
6922 pub(super) struct OutboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
6923         pub context: ChannelContext<SP>,
6924         pub unfunded_context: UnfundedChannelContext,
6925 }
6926
6927 impl<SP: Deref> OutboundV1Channel<SP> where SP::Target: SignerProvider {
6928         pub fn new<ES: Deref, F: Deref>(
6929                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP, counterparty_node_id: PublicKey, their_features: &InitFeatures,
6930                 channel_value_satoshis: u64, push_msat: u64, user_id: u128, config: &UserConfig, current_chain_height: u32,
6931                 outbound_scid_alias: u64, temporary_channel_id: Option<ChannelId>
6932         ) -> Result<OutboundV1Channel<SP>, APIError>
6933         where ES::Target: EntropySource,
6934               F::Target: FeeEstimator
6935         {
6936                 let holder_selected_contest_delay = config.channel_handshake_config.our_to_self_delay;
6937                 let channel_keys_id = signer_provider.generate_channel_keys_id(false, channel_value_satoshis, user_id);
6938                 let holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
6939                 let pubkeys = holder_signer.pubkeys().clone();
6940
6941                 if !their_features.supports_wumbo() && channel_value_satoshis > MAX_FUNDING_SATOSHIS_NO_WUMBO {
6942                         return Err(APIError::APIMisuseError{err: format!("funding_value must not exceed {}, it was {}", MAX_FUNDING_SATOSHIS_NO_WUMBO, channel_value_satoshis)});
6943                 }
6944                 if channel_value_satoshis >= TOTAL_BITCOIN_SUPPLY_SATOSHIS {
6945                         return Err(APIError::APIMisuseError{err: format!("funding_value must be smaller than the total bitcoin supply, it was {}", channel_value_satoshis)});
6946                 }
6947                 let channel_value_msat = channel_value_satoshis * 1000;
6948                 if push_msat > channel_value_msat {
6949                         return Err(APIError::APIMisuseError { err: format!("Push value ({}) was larger than channel_value ({})", push_msat, channel_value_msat) });
6950                 }
6951                 if holder_selected_contest_delay < BREAKDOWN_TIMEOUT {
6952                         return Err(APIError::APIMisuseError {err: format!("Configured with an unreasonable our_to_self_delay ({}) putting user funds at risks", holder_selected_contest_delay)});
6953                 }
6954                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(channel_value_satoshis, config);
6955                 if holder_selected_channel_reserve_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
6956                         // Protocol level safety check in place, although it should never happen because
6957                         // of `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
6958                         return Err(APIError::APIMisuseError { err: format!("Holder selected channel  reserve below implemention limit dust_limit_satoshis {}", holder_selected_channel_reserve_satoshis) });
6959                 }
6960
6961                 let channel_type = Self::get_initial_channel_type(&config, their_features);
6962                 debug_assert!(channel_type.is_subset(&channelmanager::provided_channel_type_features(&config)));
6963
6964                 let (commitment_conf_target, anchor_outputs_value_msat)  = if channel_type.supports_anchors_zero_fee_htlc_tx() {
6965                         (ConfirmationTarget::AnchorChannelFee, ANCHOR_OUTPUT_VALUE_SATOSHI * 2 * 1000)
6966                 } else {
6967                         (ConfirmationTarget::NonAnchorChannelFee, 0)
6968                 };
6969                 let commitment_feerate = fee_estimator.bounded_sat_per_1000_weight(commitment_conf_target);
6970
6971                 let value_to_self_msat = channel_value_satoshis * 1000 - push_msat;
6972                 let commitment_tx_fee = commit_tx_fee_msat(commitment_feerate, MIN_AFFORDABLE_HTLC_COUNT, &channel_type);
6973                 if value_to_self_msat.saturating_sub(anchor_outputs_value_msat) < commitment_tx_fee {
6974                         return Err(APIError::APIMisuseError{ err: format!("Funding amount ({}) can't even pay fee for initial commitment transaction fee of {}.", value_to_self_msat / 1000, commitment_tx_fee / 1000) });
6975                 }
6976
6977                 let mut secp_ctx = Secp256k1::new();
6978                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
6979
6980                 let shutdown_scriptpubkey = if config.channel_handshake_config.commit_upfront_shutdown_pubkey {
6981                         match signer_provider.get_shutdown_scriptpubkey() {
6982                                 Ok(scriptpubkey) => Some(scriptpubkey),
6983                                 Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get shutdown scriptpubkey".to_owned()}),
6984                         }
6985                 } else { None };
6986
6987                 if let Some(shutdown_scriptpubkey) = &shutdown_scriptpubkey {
6988                         if !shutdown_scriptpubkey.is_compatible(&their_features) {
6989                                 return Err(APIError::IncompatibleShutdownScript { script: shutdown_scriptpubkey.clone() });
6990                         }
6991                 }
6992
6993                 let destination_script = match signer_provider.get_destination_script(channel_keys_id) {
6994                         Ok(script) => script,
6995                         Err(_) => return Err(APIError::ChannelUnavailable { err: "Failed to get destination script".to_owned()}),
6996                 };
6997
6998                 let temporary_channel_id = temporary_channel_id.unwrap_or_else(|| ChannelId::temporary_from_entropy_source(entropy_source));
6999
7000                 Ok(Self {
7001                         context: ChannelContext {
7002                                 user_id,
7003
7004                                 config: LegacyChannelConfig {
7005                                         options: config.channel_config.clone(),
7006                                         announced_channel: config.channel_handshake_config.announced_channel,
7007                                         commit_upfront_shutdown_pubkey: config.channel_handshake_config.commit_upfront_shutdown_pubkey,
7008                                 },
7009
7010                                 prev_config: None,
7011
7012                                 inbound_handshake_limits_override: Some(config.channel_handshake_limits.clone()),
7013
7014                                 channel_id: temporary_channel_id,
7015                                 temporary_channel_id: Some(temporary_channel_id),
7016                                 channel_state: ChannelState::NegotiatingFunding(NegotiatingFundingFlags::OUR_INIT_SENT),
7017                                 announcement_sigs_state: AnnouncementSigsState::NotSent,
7018                                 secp_ctx,
7019                                 channel_value_satoshis,
7020
7021                                 latest_monitor_update_id: 0,
7022
7023                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
7024                                 shutdown_scriptpubkey,
7025                                 destination_script,
7026
7027                                 cur_holder_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7028                                 cur_counterparty_commitment_transaction_number: INITIAL_COMMITMENT_NUMBER,
7029                                 value_to_self_msat,
7030
7031                                 pending_inbound_htlcs: Vec::new(),
7032                                 pending_outbound_htlcs: Vec::new(),
7033                                 holding_cell_htlc_updates: Vec::new(),
7034                                 pending_update_fee: None,
7035                                 holding_cell_update_fee: None,
7036                                 next_holder_htlc_id: 0,
7037                                 next_counterparty_htlc_id: 0,
7038                                 update_time_counter: 1,
7039
7040                                 resend_order: RAACommitmentOrder::CommitmentFirst,
7041
7042                                 monitor_pending_channel_ready: false,
7043                                 monitor_pending_revoke_and_ack: false,
7044                                 monitor_pending_commitment_signed: false,
7045                                 monitor_pending_forwards: Vec::new(),
7046                                 monitor_pending_failures: Vec::new(),
7047                                 monitor_pending_finalized_fulfills: Vec::new(),
7048
7049                                 signer_pending_commitment_update: false,
7050                                 signer_pending_funding: false,
7051
7052                                 #[cfg(debug_assertions)]
7053                                 holder_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
7054                                 #[cfg(debug_assertions)]
7055                                 counterparty_max_commitment_tx_output: Mutex::new((channel_value_satoshis * 1000 - push_msat, push_msat)),
7056
7057                                 last_sent_closing_fee: None,
7058                                 pending_counterparty_closing_signed: None,
7059                                 expecting_peer_commitment_signed: false,
7060                                 closing_fee_limits: None,
7061                                 target_closing_feerate_sats_per_kw: None,
7062
7063                                 funding_tx_confirmed_in: None,
7064                                 funding_tx_confirmation_height: 0,
7065                                 short_channel_id: None,
7066                                 channel_creation_height: current_chain_height,
7067
7068                                 feerate_per_kw: commitment_feerate,
7069                                 counterparty_dust_limit_satoshis: 0,
7070                                 holder_dust_limit_satoshis: MIN_CHAN_DUST_LIMIT_SATOSHIS,
7071                                 counterparty_max_htlc_value_in_flight_msat: 0,
7072                                 holder_max_htlc_value_in_flight_msat: get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &config.channel_handshake_config),
7073                                 counterparty_selected_channel_reserve_satoshis: None, // Filled in in accept_channel
7074                                 holder_selected_channel_reserve_satoshis,
7075                                 counterparty_htlc_minimum_msat: 0,
7076                                 holder_htlc_minimum_msat: if config.channel_handshake_config.our_htlc_minimum_msat == 0 { 1 } else { config.channel_handshake_config.our_htlc_minimum_msat },
7077                                 counterparty_max_accepted_htlcs: 0,
7078                                 holder_max_accepted_htlcs: cmp::min(config.channel_handshake_config.our_max_accepted_htlcs, MAX_HTLCS),
7079                                 minimum_depth: None, // Filled in in accept_channel
7080
7081                                 counterparty_forwarding_info: None,
7082
7083                                 channel_transaction_parameters: ChannelTransactionParameters {
7084                                         holder_pubkeys: pubkeys,
7085                                         holder_selected_contest_delay: config.channel_handshake_config.our_to_self_delay,
7086                                         is_outbound_from_holder: true,
7087                                         counterparty_parameters: None,
7088                                         funding_outpoint: None,
7089                                         channel_type_features: channel_type.clone()
7090                                 },
7091                                 funding_transaction: None,
7092                                 is_batch_funding: None,
7093
7094                                 counterparty_cur_commitment_point: None,
7095                                 counterparty_prev_commitment_point: None,
7096                                 counterparty_node_id,
7097
7098                                 counterparty_shutdown_scriptpubkey: None,
7099
7100                                 commitment_secrets: CounterpartyCommitmentSecrets::new(),
7101
7102                                 channel_update_status: ChannelUpdateStatus::Enabled,
7103                                 closing_signed_in_flight: false,
7104
7105                                 announcement_sigs: None,
7106
7107                                 #[cfg(any(test, fuzzing))]
7108                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
7109                                 #[cfg(any(test, fuzzing))]
7110                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
7111
7112                                 workaround_lnd_bug_4006: None,
7113                                 sent_message_awaiting_response: None,
7114
7115                                 latest_inbound_scid_alias: None,
7116                                 outbound_scid_alias,
7117
7118                                 channel_pending_event_emitted: false,
7119                                 channel_ready_event_emitted: false,
7120
7121                                 #[cfg(any(test, fuzzing))]
7122                                 historical_inbound_htlc_fulfills: new_hash_set(),
7123
7124                                 channel_type,
7125                                 channel_keys_id,
7126
7127                                 blocked_monitor_updates: Vec::new(),
7128                                 local_initiated_shutdown: None,
7129                         },
7130                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7131                 })
7132         }
7133
7134         /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set.
7135         fn get_funding_created_msg<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7136                 let counterparty_keys = self.context.build_remote_transaction_keys();
7137                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7138                 let signature = match &self.context.holder_signer {
7139                         // TODO (taproot|arik): move match into calling method for Taproot
7140                         ChannelSignerType::Ecdsa(ecdsa) => {
7141                                 ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx)
7142                                         .map(|(sig, _)| sig).ok()?
7143                         },
7144                         // TODO (taproot|arik)
7145                         #[cfg(taproot)]
7146                         _ => todo!()
7147                 };
7148
7149                 if self.context.signer_pending_funding {
7150                         log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding");
7151                         self.context.signer_pending_funding = false;
7152                 }
7153
7154                 Some(msgs::FundingCreated {
7155                         temporary_channel_id: self.context.temporary_channel_id.unwrap(),
7156                         funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid,
7157                         funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index,
7158                         signature,
7159                         #[cfg(taproot)]
7160                         partial_signature_with_nonce: None,
7161                         #[cfg(taproot)]
7162                         next_local_nonce: None,
7163                 })
7164         }
7165
7166         /// Updates channel state with knowledge of the funding transaction's txid/index, and generates
7167         /// a funding_created message for the remote peer.
7168         /// Panics if called at some time other than immediately after initial handshake, if called twice,
7169         /// or if called on an inbound channel.
7170         /// Note that channel_id changes during this call!
7171         /// Do NOT broadcast the funding transaction until after a successful funding_signed call!
7172         /// If an Err is returned, it is a ChannelError::Close.
7173         pub fn get_funding_created<L: Deref>(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L)
7174         -> Result<Option<msgs::FundingCreated>, (Self, ChannelError)> where L::Target: Logger {
7175                 if !self.context.is_outbound() {
7176                         panic!("Tried to create outbound funding_created message on an inbound channel!");
7177                 }
7178                 if !matches!(
7179                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7180                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7181                 ) {
7182                         panic!("Tried to get a funding_created messsage at a time other than immediately after initial handshake completion (or tried to get funding_created twice)");
7183                 }
7184                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7185                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7186                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7187                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7188                 }
7189
7190                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7191                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7192
7193                 // Now that we're past error-generating stuff, update our local state:
7194
7195                 self.context.channel_state = ChannelState::FundingNegotiated;
7196                 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7197
7198                 // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100.
7199                 // We can skip this if it is a zero-conf channel.
7200                 if funding_transaction.is_coin_base() &&
7201                         self.context.minimum_depth.unwrap_or(0) > 0 &&
7202                         self.context.minimum_depth.unwrap_or(0) < COINBASE_MATURITY {
7203                         self.context.minimum_depth = Some(COINBASE_MATURITY);
7204                 }
7205
7206                 self.context.funding_transaction = Some(funding_transaction);
7207                 self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding);
7208
7209                 let funding_created = self.get_funding_created_msg(logger);
7210                 if funding_created.is_none() {
7211                         #[cfg(not(async_signing))] {
7212                                 panic!("Failed to get signature for new funding creation");
7213                         }
7214                         #[cfg(async_signing)] {
7215                                 if !self.context.signer_pending_funding {
7216                                         log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding");
7217                                         self.context.signer_pending_funding = true;
7218                                 }
7219                         }
7220                 }
7221
7222                 Ok(funding_created)
7223         }
7224
7225         fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures {
7226                 // The default channel type (ie the first one we try) depends on whether the channel is
7227                 // public - if it is, we just go with `only_static_remotekey` as it's the only option
7228                 // available. If it's private, we first try `scid_privacy` as it provides better privacy
7229                 // with no other changes, and fall back to `only_static_remotekey`.
7230                 let mut ret = ChannelTypeFeatures::only_static_remote_key();
7231                 if !config.channel_handshake_config.announced_channel &&
7232                         config.channel_handshake_config.negotiate_scid_privacy &&
7233                         their_features.supports_scid_privacy() {
7234                         ret.set_scid_privacy_required();
7235                 }
7236
7237                 // Optionally, if the user would like to negotiate the `anchors_zero_fee_htlc_tx` option, we
7238                 // set it now. If they don't understand it, we'll fall back to our default of
7239                 // `only_static_remotekey`.
7240                 if config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx &&
7241                         their_features.supports_anchors_zero_fee_htlc_tx() {
7242                         ret.set_anchors_zero_fee_htlc_tx_required();
7243                 }
7244
7245                 ret
7246         }
7247
7248         /// If we receive an error message, it may only be a rejection of the channel type we tried,
7249         /// not of our ability to open any channel at all. Thus, on error, we should first call this
7250         /// and see if we get a new `OpenChannel` message, otherwise the channel is failed.
7251         pub(crate) fn maybe_handle_error_without_close<F: Deref>(
7252                 &mut self, chain_hash: ChainHash, fee_estimator: &LowerBoundedFeeEstimator<F>
7253         ) -> Result<msgs::OpenChannel, ()>
7254         where
7255                 F::Target: FeeEstimator
7256         {
7257                 if !self.context.is_outbound() ||
7258                         !matches!(
7259                                 self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7260                                 if flags == NegotiatingFundingFlags::OUR_INIT_SENT
7261                         )
7262                 {
7263                         return Err(());
7264                 }
7265                 if self.context.channel_type == ChannelTypeFeatures::only_static_remote_key() {
7266                         // We've exhausted our options
7267                         return Err(());
7268                 }
7269                 // We support opening a few different types of channels. Try removing our additional
7270                 // features one by one until we've either arrived at our default or the counterparty has
7271                 // accepted one.
7272                 //
7273                 // Due to the order below, we may not negotiate `option_anchors_zero_fee_htlc_tx` if the
7274                 // counterparty doesn't support `option_scid_privacy`. Since `get_initial_channel_type`
7275                 // checks whether the counterparty supports every feature, this would only happen if the
7276                 // counterparty is advertising the feature, but rejecting channels proposing the feature for
7277                 // whatever reason.
7278                 if self.context.channel_type.supports_anchors_zero_fee_htlc_tx() {
7279                         self.context.channel_type.clear_anchors_zero_fee_htlc_tx();
7280                         self.context.feerate_per_kw = fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::NonAnchorChannelFee);
7281                         assert!(!self.context.channel_transaction_parameters.channel_type_features.supports_anchors_nonzero_fee_htlc_tx());
7282                 } else if self.context.channel_type.supports_scid_privacy() {
7283                         self.context.channel_type.clear_scid_privacy();
7284                 } else {
7285                         self.context.channel_type = ChannelTypeFeatures::only_static_remote_key();
7286                 }
7287                 self.context.channel_transaction_parameters.channel_type_features = self.context.channel_type.clone();
7288                 Ok(self.get_open_channel(chain_hash))
7289         }
7290
7291         pub fn get_open_channel(&self, chain_hash: ChainHash) -> msgs::OpenChannel {
7292                 if !self.context.is_outbound() {
7293                         panic!("Tried to open a channel for an inbound channel?");
7294                 }
7295                 if self.context.have_received_message() {
7296                         panic!("Cannot generate an open_channel after we've moved forward");
7297                 }
7298
7299                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7300                         panic!("Tried to send an open_channel for a channel that has already advanced");
7301                 }
7302
7303                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7304                 let keys = self.context.get_holder_pubkeys();
7305
7306                 msgs::OpenChannel {
7307                         common_fields: msgs::CommonOpenChannelFields {
7308                                 chain_hash,
7309                                 temporary_channel_id: self.context.channel_id,
7310                                 funding_satoshis: self.context.channel_value_satoshis,
7311                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7312                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7313                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7314                                 commitment_feerate_sat_per_1000_weight: self.context.feerate_per_kw as u32,
7315                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
7316                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7317                                 funding_pubkey: keys.funding_pubkey,
7318                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7319                                 payment_basepoint: keys.payment_point,
7320                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7321                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7322                                 first_per_commitment_point,
7323                                 channel_flags: if self.context.config.announced_channel {1} else {0},
7324                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7325                                         Some(script) => script.clone().into_inner(),
7326                                         None => Builder::new().into_script(),
7327                                 }),
7328                                 channel_type: Some(self.context.channel_type.clone()),
7329                         },
7330                         push_msat: self.context.channel_value_satoshis * 1000 - self.context.value_to_self_msat,
7331                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7332                 }
7333         }
7334
7335         // Message handlers
7336         pub fn accept_channel(&mut self, msg: &msgs::AcceptChannel, default_limits: &ChannelHandshakeLimits, their_features: &InitFeatures) -> Result<(), ChannelError> {
7337                 let peer_limits = if let Some(ref limits) = self.context.inbound_handshake_limits_override { limits } else { default_limits };
7338
7339                 // Check sanity of message fields:
7340                 if !self.context.is_outbound() {
7341                         return Err(ChannelError::Close("Got an accept_channel message from an inbound peer".to_owned()));
7342                 }
7343                 if !matches!(self.context.channel_state, ChannelState::NegotiatingFunding(flags) if flags == NegotiatingFundingFlags::OUR_INIT_SENT) {
7344                         return Err(ChannelError::Close("Got an accept_channel message at a strange time".to_owned()));
7345                 }
7346                 if msg.common_fields.dust_limit_satoshis > 21000000 * 100000000 {
7347                         return Err(ChannelError::Close(format!("Peer never wants payout outputs? dust_limit_satoshis was {}", msg.common_fields.dust_limit_satoshis)));
7348                 }
7349                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis {
7350                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than ({})", msg.channel_reserve_satoshis, self.context.channel_value_satoshis)));
7351                 }
7352                 if msg.common_fields.dust_limit_satoshis > self.context.holder_selected_channel_reserve_satoshis {
7353                         return Err(ChannelError::Close(format!("Dust limit ({}) is bigger than our channel reserve ({})", msg.common_fields.dust_limit_satoshis, self.context.holder_selected_channel_reserve_satoshis)));
7354                 }
7355                 if msg.channel_reserve_satoshis > self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis {
7356                         return Err(ChannelError::Close(format!("Bogus channel_reserve_satoshis ({}). Must not be greater than channel value minus our reserve ({})",
7357                                 msg.channel_reserve_satoshis, self.context.channel_value_satoshis - self.context.holder_selected_channel_reserve_satoshis)));
7358                 }
7359                 let full_channel_value_msat = (self.context.channel_value_satoshis - msg.channel_reserve_satoshis) * 1000;
7360                 if msg.common_fields.htlc_minimum_msat >= full_channel_value_msat {
7361                         return Err(ChannelError::Close(format!("Minimum htlc value ({}) is full channel value ({})", msg.common_fields.htlc_minimum_msat, full_channel_value_msat)));
7362                 }
7363                 let max_delay_acceptable = u16::min(peer_limits.their_to_self_delay, MAX_LOCAL_BREAKDOWN_TIMEOUT);
7364                 if msg.common_fields.to_self_delay > max_delay_acceptable {
7365                         return Err(ChannelError::Close(format!("They wanted our payments to be delayed by a needlessly long period. Upper limit: {}. Actual: {}", max_delay_acceptable, msg.common_fields.to_self_delay)));
7366                 }
7367                 if msg.common_fields.max_accepted_htlcs < 1 {
7368                         return Err(ChannelError::Close("0 max_accepted_htlcs makes for a useless channel".to_owned()));
7369                 }
7370                 if msg.common_fields.max_accepted_htlcs > MAX_HTLCS {
7371                         return Err(ChannelError::Close(format!("max_accepted_htlcs was {}. It must not be larger than {}", msg.common_fields.max_accepted_htlcs, MAX_HTLCS)));
7372                 }
7373
7374                 // Now check against optional parameters as set by config...
7375                 if msg.common_fields.htlc_minimum_msat > peer_limits.max_htlc_minimum_msat {
7376                         return Err(ChannelError::Close(format!("htlc_minimum_msat ({}) is higher than the user specified limit ({})", msg.common_fields.htlc_minimum_msat, peer_limits.max_htlc_minimum_msat)));
7377                 }
7378                 if msg.common_fields.max_htlc_value_in_flight_msat < peer_limits.min_max_htlc_value_in_flight_msat {
7379                         return Err(ChannelError::Close(format!("max_htlc_value_in_flight_msat ({}) is less than the user specified limit ({})", msg.common_fields.max_htlc_value_in_flight_msat, peer_limits.min_max_htlc_value_in_flight_msat)));
7380                 }
7381                 if msg.channel_reserve_satoshis > peer_limits.max_channel_reserve_satoshis {
7382                         return Err(ChannelError::Close(format!("channel_reserve_satoshis ({}) is higher than the user specified limit ({})", msg.channel_reserve_satoshis, peer_limits.max_channel_reserve_satoshis)));
7383                 }
7384                 if msg.common_fields.max_accepted_htlcs < peer_limits.min_max_accepted_htlcs {
7385                         return Err(ChannelError::Close(format!("max_accepted_htlcs ({}) is less than the user specified limit ({})", msg.common_fields.max_accepted_htlcs, peer_limits.min_max_accepted_htlcs)));
7386                 }
7387                 if msg.common_fields.dust_limit_satoshis < MIN_CHAN_DUST_LIMIT_SATOSHIS {
7388                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is less than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MIN_CHAN_DUST_LIMIT_SATOSHIS)));
7389                 }
7390                 if msg.common_fields.dust_limit_satoshis > MAX_CHAN_DUST_LIMIT_SATOSHIS {
7391                         return Err(ChannelError::Close(format!("dust_limit_satoshis ({}) is greater than the implementation limit ({})", msg.common_fields.dust_limit_satoshis, MAX_CHAN_DUST_LIMIT_SATOSHIS)));
7392                 }
7393                 if msg.common_fields.minimum_depth > peer_limits.max_minimum_depth {
7394                         return Err(ChannelError::Close(format!("We consider the minimum depth to be unreasonably large. Expected minimum: ({}). Actual: ({})", peer_limits.max_minimum_depth, msg.common_fields.minimum_depth)));
7395                 }
7396
7397                 if let Some(ty) = &msg.common_fields.channel_type {
7398                         if *ty != self.context.channel_type {
7399                                 return Err(ChannelError::Close("Channel Type in accept_channel didn't match the one sent in open_channel.".to_owned()));
7400                         }
7401                 } else if their_features.supports_channel_type() {
7402                         // Assume they've accepted the channel type as they said they understand it.
7403                 } else {
7404                         let channel_type = ChannelTypeFeatures::from_init(&their_features);
7405                         if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7406                                 return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7407                         }
7408                         self.context.channel_type = channel_type.clone();
7409                         self.context.channel_transaction_parameters.channel_type_features = channel_type;
7410                 }
7411
7412                 let counterparty_shutdown_scriptpubkey = if their_features.supports_upfront_shutdown_script() {
7413                         match &msg.common_fields.shutdown_scriptpubkey {
7414                                 &Some(ref script) => {
7415                                         // Peer is signaling upfront_shutdown and has opt-out with a 0-length script. We don't enforce anything
7416                                         if script.len() == 0 {
7417                                                 None
7418                                         } else {
7419                                                 if !script::is_bolt2_compliant(&script, their_features) {
7420                                                         return Err(ChannelError::Close(format!("Peer is signaling upfront_shutdown but has provided an unacceptable scriptpubkey format: {}", script)));
7421                                                 }
7422                                                 Some(script.clone())
7423                                         }
7424                                 },
7425                                 // Peer is signaling upfront shutdown but don't opt-out with correct mechanism (a.k.a 0-length script). Peer looks buggy, we fail the channel
7426                                 &None => {
7427                                         return Err(ChannelError::Close("Peer is signaling upfront_shutdown but we don't get any script. Use 0-length script to opt-out".to_owned()));
7428                                 }
7429                         }
7430                 } else { None };
7431
7432                 self.context.counterparty_dust_limit_satoshis = msg.common_fields.dust_limit_satoshis;
7433                 self.context.counterparty_max_htlc_value_in_flight_msat = cmp::min(msg.common_fields.max_htlc_value_in_flight_msat, self.context.channel_value_satoshis * 1000);
7434                 self.context.counterparty_selected_channel_reserve_satoshis = Some(msg.channel_reserve_satoshis);
7435                 self.context.counterparty_htlc_minimum_msat = msg.common_fields.htlc_minimum_msat;
7436                 self.context.counterparty_max_accepted_htlcs = msg.common_fields.max_accepted_htlcs;
7437
7438                 if peer_limits.trust_own_funding_0conf {
7439                         self.context.minimum_depth = Some(msg.common_fields.minimum_depth);
7440                 } else {
7441                         self.context.minimum_depth = Some(cmp::max(1, msg.common_fields.minimum_depth));
7442                 }
7443
7444                 let counterparty_pubkeys = ChannelPublicKeys {
7445                         funding_pubkey: msg.common_fields.funding_pubkey,
7446                         revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7447                         payment_point: msg.common_fields.payment_basepoint,
7448                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7449                         htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7450                 };
7451
7452                 self.context.channel_transaction_parameters.counterparty_parameters = Some(CounterpartyChannelTransactionParameters {
7453                         selected_contest_delay: msg.common_fields.to_self_delay,
7454                         pubkeys: counterparty_pubkeys,
7455                 });
7456
7457                 self.context.counterparty_cur_commitment_point = Some(msg.common_fields.first_per_commitment_point);
7458                 self.context.counterparty_shutdown_scriptpubkey = counterparty_shutdown_scriptpubkey;
7459
7460                 self.context.channel_state = ChannelState::NegotiatingFunding(
7461                         NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT
7462                 );
7463                 self.context.inbound_handshake_limits_override = None; // We're done enforcing limits on our peer's handshake now.
7464
7465                 Ok(())
7466         }
7467
7468         /// Handles a funding_signed message from the remote end.
7469         /// If this call is successful, broadcast the funding transaction (and not before!)
7470         pub fn funding_signed<L: Deref>(
7471                 mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L
7472         ) -> Result<(Channel<SP>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (OutboundV1Channel<SP>, ChannelError)>
7473         where
7474                 L::Target: Logger
7475         {
7476                 if !self.context.is_outbound() {
7477                         return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())));
7478                 }
7479                 if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) {
7480                         return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned())));
7481                 }
7482                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7483                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7484                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7485                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7486                 }
7487
7488                 let funding_script = self.context.get_funding_redeemscript();
7489
7490                 let counterparty_keys = self.context.build_remote_transaction_keys();
7491                 let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx;
7492                 let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust();
7493                 let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction();
7494
7495                 log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}",
7496                         &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction));
7497
7498                 let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7499                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx;
7500                 {
7501                         let trusted_tx = initial_commitment_tx.trust();
7502                         let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7503                         let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7504                         // They sign our commitment transaction, allowing us to broadcast the tx if we wish.
7505                         if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) {
7506                                 return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned())));
7507                         }
7508                 }
7509
7510                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7511                         initial_commitment_tx,
7512                         msg.signature,
7513                         Vec::new(),
7514                         &self.context.get_holder_pubkeys().funding_pubkey,
7515                         self.context.counterparty_funding_pubkey()
7516                 );
7517
7518                 let validated =
7519                         self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new());
7520                 if validated.is_err() {
7521                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7522                 }
7523
7524                 let funding_redeemscript = self.context.get_funding_redeemscript();
7525                 let funding_txo = self.context.get_funding_txo().unwrap();
7526                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7527                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7528                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7529                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7530                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7531                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7532                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
7533                                                           &self.context.destination_script, (funding_txo, funding_txo_script),
7534                                                           &self.context.channel_transaction_parameters,
7535                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
7536                                                           obscure_factor,
7537                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7538                 channel_monitor.provide_initial_counterparty_commitment_tx(
7539                         counterparty_initial_bitcoin_tx.txid, Vec::new(),
7540                         self.context.cur_counterparty_commitment_transaction_number,
7541                         self.context.counterparty_cur_commitment_point.unwrap(),
7542                         counterparty_initial_commitment_tx.feerate_per_kw(),
7543                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7544                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7545
7546                 assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update!
7547                 if self.context.is_batch_funding() {
7548                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH);
7549                 } else {
7550                         self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7551                 }
7552                 self.context.cur_holder_commitment_transaction_number -= 1;
7553                 self.context.cur_counterparty_commitment_transaction_number -= 1;
7554
7555                 log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id());
7556
7557                 let mut channel = Channel { context: self.context };
7558
7559                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7560                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7561                 Ok((channel, channel_monitor))
7562         }
7563
7564         /// Indicates that the signer may have some signatures for us, so we should retry if we're
7565         /// blocked.
7566         #[cfg(async_signing)]
7567         pub fn signer_maybe_unblocked<L: Deref>(&mut self, logger: &L) -> Option<msgs::FundingCreated> where L::Target: Logger {
7568                 if self.context.signer_pending_funding && self.context.is_outbound() {
7569                         log_trace!(logger, "Signer unblocked a funding_created");
7570                         self.get_funding_created_msg(logger)
7571                 } else { None }
7572         }
7573 }
7574
7575 /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment.
7576 pub(super) struct InboundV1Channel<SP: Deref> where SP::Target: SignerProvider {
7577         pub context: ChannelContext<SP>,
7578         pub unfunded_context: UnfundedChannelContext,
7579 }
7580
7581 /// Fetches the [`ChannelTypeFeatures`] that will be used for a channel built from a given
7582 /// [`msgs::OpenChannel`].
7583 pub(super) fn channel_type_from_open_channel(
7584         msg: &msgs::OpenChannel, their_features: &InitFeatures,
7585         our_supported_features: &ChannelTypeFeatures
7586 ) -> Result<ChannelTypeFeatures, ChannelError> {
7587         if let Some(channel_type) = &msg.common_fields.channel_type {
7588                 if channel_type.supports_any_optional_bits() {
7589                         return Err(ChannelError::Close("Channel Type field contained optional bits - this is not allowed".to_owned()));
7590                 }
7591
7592                 // We only support the channel types defined by the `ChannelManager` in
7593                 // `provided_channel_type_features`. The channel type must always support
7594                 // `static_remote_key`.
7595                 if !channel_type.requires_static_remote_key() {
7596                         return Err(ChannelError::Close("Channel Type was not understood - we require static remote key".to_owned()));
7597                 }
7598                 // Make sure we support all of the features behind the channel type.
7599                 if !channel_type.is_subset(our_supported_features) {
7600                         return Err(ChannelError::Close("Channel Type contains unsupported features".to_owned()));
7601                 }
7602                 let announced_channel = if (msg.common_fields.channel_flags & 1) == 1 { true } else { false };
7603                 if channel_type.requires_scid_privacy() && announced_channel {
7604                         return Err(ChannelError::Close("SCID Alias/Privacy Channel Type cannot be set on a public channel".to_owned()));
7605                 }
7606                 Ok(channel_type.clone())
7607         } else {
7608                 let channel_type = ChannelTypeFeatures::from_init(&their_features);
7609                 if channel_type != ChannelTypeFeatures::only_static_remote_key() {
7610                         return Err(ChannelError::Close("Only static_remote_key is supported for non-negotiated channel types".to_owned()));
7611                 }
7612                 Ok(channel_type)
7613         }
7614 }
7615
7616 impl<SP: Deref> InboundV1Channel<SP> where SP::Target: SignerProvider {
7617         /// Creates a new channel from a remote sides' request for one.
7618         /// Assumes chain_hash has already been checked and corresponds with what we expect!
7619         pub fn new<ES: Deref, F: Deref, L: Deref>(
7620                 fee_estimator: &LowerBoundedFeeEstimator<F>, entropy_source: &ES, signer_provider: &SP,
7621                 counterparty_node_id: PublicKey, our_supported_features: &ChannelTypeFeatures,
7622                 their_features: &InitFeatures, msg: &msgs::OpenChannel, user_id: u128, config: &UserConfig,
7623                 current_chain_height: u32, logger: &L, is_0conf: bool,
7624         ) -> Result<InboundV1Channel<SP>, ChannelError>
7625                 where ES::Target: EntropySource,
7626                           F::Target: FeeEstimator,
7627                           L::Target: Logger,
7628         {
7629                 let logger = WithContext::from(logger, Some(counterparty_node_id), Some(msg.common_fields.temporary_channel_id));
7630
7631                 // First check the channel type is known, failing before we do anything else if we don't
7632                 // support this channel type.
7633                 let channel_type = channel_type_from_open_channel(msg, their_features, our_supported_features)?;
7634
7635                 let holder_selected_channel_reserve_satoshis = get_holder_selected_channel_reserve_satoshis(msg.common_fields.funding_satoshis, config);
7636                 let counterparty_pubkeys = ChannelPublicKeys {
7637                         funding_pubkey: msg.common_fields.funding_pubkey,
7638                         revocation_basepoint: RevocationBasepoint::from(msg.common_fields.revocation_basepoint),
7639                         payment_point: msg.common_fields.payment_basepoint,
7640                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(msg.common_fields.delayed_payment_basepoint),
7641                         htlc_basepoint: HtlcBasepoint::from(msg.common_fields.htlc_basepoint)
7642                 };
7643
7644                 let chan = Self {
7645                         context: ChannelContext::new_for_inbound_channel(
7646                                 fee_estimator,
7647                                 entropy_source,
7648                                 signer_provider,
7649                                 counterparty_node_id,
7650                                 their_features,
7651                                 user_id,
7652                                 config,
7653                                 current_chain_height,
7654                                 &&logger,
7655                                 is_0conf,
7656                                 0,
7657
7658                                 counterparty_pubkeys,
7659                                 channel_type,
7660                                 holder_selected_channel_reserve_satoshis,
7661                                 msg.channel_reserve_satoshis,
7662                                 msg.push_msat,
7663                                 msg.common_fields.clone(),
7664                         )?,
7665                         unfunded_context: UnfundedChannelContext { unfunded_channel_age_ticks: 0 }
7666                 };
7667                 Ok(chan)
7668         }
7669
7670         /// Marks an inbound channel as accepted and generates a [`msgs::AcceptChannel`] message which
7671         /// should be sent back to the counterparty node.
7672         ///
7673         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7674         pub fn accept_inbound_channel(&mut self) -> msgs::AcceptChannel {
7675                 if self.context.is_outbound() {
7676                         panic!("Tried to send accept_channel for an outbound channel?");
7677                 }
7678                 if !matches!(
7679                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7680                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7681                 ) {
7682                         panic!("Tried to send accept_channel after channel had moved forward");
7683                 }
7684                 if self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7685                         panic!("Tried to send an accept_channel for a channel that has already advanced");
7686                 }
7687
7688                 self.generate_accept_channel_message()
7689         }
7690
7691         /// This function is used to explicitly generate a [`msgs::AcceptChannel`] message for an
7692         /// inbound channel. If the intention is to accept an inbound channel, use
7693         /// [`InboundV1Channel::accept_inbound_channel`] instead.
7694         ///
7695         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7696         fn generate_accept_channel_message(&self) -> msgs::AcceptChannel {
7697                 let first_per_commitment_point = self.context.holder_signer.as_ref().get_per_commitment_point(self.context.cur_holder_commitment_transaction_number, &self.context.secp_ctx);
7698                 let keys = self.context.get_holder_pubkeys();
7699
7700                 msgs::AcceptChannel {
7701                         common_fields: msgs::CommonAcceptChannelFields {
7702                                 temporary_channel_id: self.context.channel_id,
7703                                 dust_limit_satoshis: self.context.holder_dust_limit_satoshis,
7704                                 max_htlc_value_in_flight_msat: self.context.holder_max_htlc_value_in_flight_msat,
7705                                 htlc_minimum_msat: self.context.holder_htlc_minimum_msat,
7706                                 minimum_depth: self.context.minimum_depth.unwrap(),
7707                                 to_self_delay: self.context.get_holder_selected_contest_delay(),
7708                                 max_accepted_htlcs: self.context.holder_max_accepted_htlcs,
7709                                 funding_pubkey: keys.funding_pubkey,
7710                                 revocation_basepoint: keys.revocation_basepoint.to_public_key(),
7711                                 payment_basepoint: keys.payment_point,
7712                                 delayed_payment_basepoint: keys.delayed_payment_basepoint.to_public_key(),
7713                                 htlc_basepoint: keys.htlc_basepoint.to_public_key(),
7714                                 first_per_commitment_point,
7715                                 shutdown_scriptpubkey: Some(match &self.context.shutdown_scriptpubkey {
7716                                         Some(script) => script.clone().into_inner(),
7717                                         None => Builder::new().into_script(),
7718                                 }),
7719                                 channel_type: Some(self.context.channel_type.clone()),
7720                         },
7721                         channel_reserve_satoshis: self.context.holder_selected_channel_reserve_satoshis,
7722                         #[cfg(taproot)]
7723                         next_local_nonce: None,
7724                 }
7725         }
7726
7727         /// Enables the possibility for tests to extract a [`msgs::AcceptChannel`] message for an
7728         /// inbound channel without accepting it.
7729         ///
7730         /// [`msgs::AcceptChannel`]: crate::ln::msgs::AcceptChannel
7731         #[cfg(test)]
7732         pub fn get_accept_channel_message(&self) -> msgs::AcceptChannel {
7733                 self.generate_accept_channel_message()
7734         }
7735
7736         fn check_funding_created_signature<L: Deref>(&mut self, sig: &Signature, logger: &L) -> Result<CommitmentTransaction, ChannelError> where L::Target: Logger {
7737                 let funding_script = self.context.get_funding_redeemscript();
7738
7739                 let keys = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number);
7740                 let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &keys, true, false, logger).tx;
7741                 let trusted_tx = initial_commitment_tx.trust();
7742                 let initial_commitment_bitcoin_tx = trusted_tx.built_transaction();
7743                 let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis);
7744                 // They sign the holder commitment transaction...
7745                 log_trace!(logger, "Checking funding_created tx signature {} by key {} against tx {} (sighash {}) with redeemscript {} for channel {}.",
7746                         log_bytes!(sig.serialize_compact()[..]), log_bytes!(self.context.counterparty_funding_pubkey().serialize()),
7747                         encode::serialize_hex(&initial_commitment_bitcoin_tx.transaction), log_bytes!(sighash[..]),
7748                         encode::serialize_hex(&funding_script), &self.context.channel_id());
7749                 secp_check!(self.context.secp_ctx.verify_ecdsa(&sighash, &sig, self.context.counterparty_funding_pubkey()), "Invalid funding_created signature from peer".to_owned());
7750
7751                 Ok(initial_commitment_tx)
7752         }
7753
7754         pub fn funding_created<L: Deref>(
7755                 mut self, msg: &msgs::FundingCreated, best_block: BestBlock, signer_provider: &SP, logger: &L
7756         ) -> Result<(Channel<SP>, Option<msgs::FundingSigned>, ChannelMonitor<<SP::Target as SignerProvider>::EcdsaSigner>), (Self, ChannelError)>
7757         where
7758                 L::Target: Logger
7759         {
7760                 if self.context.is_outbound() {
7761                         return Err((self, ChannelError::Close("Received funding_created for an outbound channel?".to_owned())));
7762                 }
7763                 if !matches!(
7764                         self.context.channel_state, ChannelState::NegotiatingFunding(flags)
7765                         if flags == (NegotiatingFundingFlags::OUR_INIT_SENT | NegotiatingFundingFlags::THEIR_INIT_SENT)
7766                 ) {
7767                         // BOLT 2 says that if we disconnect before we send funding_signed we SHOULD NOT
7768                         // remember the channel, so it's safe to just send an error_message here and drop the
7769                         // channel.
7770                         return Err((self, ChannelError::Close("Received funding_created after we got the channel!".to_owned())));
7771                 }
7772                 if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) ||
7773                                 self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER ||
7774                                 self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER {
7775                         panic!("Should not have advanced channel commitment tx numbers prior to funding_created");
7776                 }
7777
7778                 let funding_txo = OutPoint { txid: msg.funding_txid, index: msg.funding_output_index };
7779                 self.context.channel_transaction_parameters.funding_outpoint = Some(funding_txo);
7780                 // This is an externally observable change before we finish all our checks.  In particular
7781                 // check_funding_created_signature may fail.
7782                 self.context.holder_signer.as_mut().provide_channel_parameters(&self.context.channel_transaction_parameters);
7783
7784                 let initial_commitment_tx = match self.check_funding_created_signature(&msg.signature, logger) {
7785                         Ok(res) => res,
7786                         Err(ChannelError::Close(e)) => {
7787                                 self.context.channel_transaction_parameters.funding_outpoint = None;
7788                                 return Err((self, ChannelError::Close(e)));
7789                         },
7790                         Err(e) => {
7791                                 // The only error we know how to handle is ChannelError::Close, so we fall over here
7792                                 // to make sure we don't continue with an inconsistent state.
7793                                 panic!("unexpected error type from check_funding_created_signature {:?}", e);
7794                         }
7795                 };
7796
7797                 let holder_commitment_tx = HolderCommitmentTransaction::new(
7798                         initial_commitment_tx,
7799                         msg.signature,
7800                         Vec::new(),
7801                         &self.context.get_holder_pubkeys().funding_pubkey,
7802                         self.context.counterparty_funding_pubkey()
7803                 );
7804
7805                 if let Err(_) = self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) {
7806                         return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned())));
7807                 }
7808
7809                 // Now that we're past error-generating stuff, update our local state:
7810
7811                 self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new());
7812                 self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo);
7813                 self.context.cur_counterparty_commitment_transaction_number -= 1;
7814                 self.context.cur_holder_commitment_transaction_number -= 1;
7815
7816                 let (counterparty_initial_commitment_tx, funding_signed) = self.context.get_funding_signed_msg(logger);
7817
7818                 let funding_redeemscript = self.context.get_funding_redeemscript();
7819                 let funding_txo_script = funding_redeemscript.to_v0_p2wsh();
7820                 let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound());
7821                 let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner());
7822                 let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id);
7823                 monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters);
7824                 let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer,
7825                                                           shutdown_script, self.context.get_holder_selected_contest_delay(),
7826                                                           &self.context.destination_script, (funding_txo, funding_txo_script.clone()),
7827                                                           &self.context.channel_transaction_parameters,
7828                                                           funding_redeemscript.clone(), self.context.channel_value_satoshis,
7829                                                           obscure_factor,
7830                                                           holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id());
7831                 channel_monitor.provide_initial_counterparty_commitment_tx(
7832                         counterparty_initial_commitment_tx.trust().txid(), Vec::new(),
7833                         self.context.cur_counterparty_commitment_transaction_number + 1,
7834                         self.context.counterparty_cur_commitment_point.unwrap(), self.context.feerate_per_kw,
7835                         counterparty_initial_commitment_tx.to_broadcaster_value_sat(),
7836                         counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger);
7837
7838                 log_info!(logger, "{} funding_signed for peer for channel {}",
7839                         if funding_signed.is_some() { "Generated" } else { "Waiting for signature on" }, &self.context.channel_id());
7840
7841                 // Promote the channel to a full-fledged one now that we have updated the state and have a
7842                 // `ChannelMonitor`.
7843                 let mut channel = Channel {
7844                         context: self.context,
7845                 };
7846                 let need_channel_ready = channel.check_get_channel_ready(0).is_some();
7847                 channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
7848
7849                 Ok((channel, funding_signed, channel_monitor))
7850         }
7851 }
7852
7853 const SERIALIZATION_VERSION: u8 = 3;
7854 const MIN_SERIALIZATION_VERSION: u8 = 3;
7855
7856 impl_writeable_tlv_based_enum!(InboundHTLCRemovalReason,;
7857         (0, FailRelay),
7858         (1, FailMalformed),
7859         (2, Fulfill),
7860 );
7861
7862 impl Writeable for ChannelUpdateStatus {
7863         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7864                 // We only care about writing out the current state as it was announced, ie only either
7865                 // Enabled or Disabled. In the case of DisabledStaged, we most recently announced the
7866                 // channel as enabled, so we write 0. For EnabledStaged, we similarly write a 1.
7867                 match self {
7868                         ChannelUpdateStatus::Enabled => 0u8.write(writer)?,
7869                         ChannelUpdateStatus::DisabledStaged(_) => 0u8.write(writer)?,
7870                         ChannelUpdateStatus::EnabledStaged(_) => 1u8.write(writer)?,
7871                         ChannelUpdateStatus::Disabled => 1u8.write(writer)?,
7872                 }
7873                 Ok(())
7874         }
7875 }
7876
7877 impl Readable for ChannelUpdateStatus {
7878         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7879                 Ok(match <u8 as Readable>::read(reader)? {
7880                         0 => ChannelUpdateStatus::Enabled,
7881                         1 => ChannelUpdateStatus::Disabled,
7882                         _ => return Err(DecodeError::InvalidValue),
7883                 })
7884         }
7885 }
7886
7887 impl Writeable for AnnouncementSigsState {
7888         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7889                 // We only care about writing out the current state as if we had just disconnected, at
7890                 // which point we always set anything but AnnouncementSigsReceived to NotSent.
7891                 match self {
7892                         AnnouncementSigsState::NotSent => 0u8.write(writer),
7893                         AnnouncementSigsState::MessageSent => 0u8.write(writer),
7894                         AnnouncementSigsState::Committed => 0u8.write(writer),
7895                         AnnouncementSigsState::PeerReceived => 1u8.write(writer),
7896                 }
7897         }
7898 }
7899
7900 impl Readable for AnnouncementSigsState {
7901         fn read<R: io::Read>(reader: &mut R) -> Result<Self, DecodeError> {
7902                 Ok(match <u8 as Readable>::read(reader)? {
7903                         0 => AnnouncementSigsState::NotSent,
7904                         1 => AnnouncementSigsState::PeerReceived,
7905                         _ => return Err(DecodeError::InvalidValue),
7906                 })
7907         }
7908 }
7909
7910 impl<SP: Deref> Writeable for Channel<SP> where SP::Target: SignerProvider {
7911         fn write<W: Writer>(&self, writer: &mut W) -> Result<(), io::Error> {
7912                 // Note that we write out as if remove_uncommitted_htlcs_and_mark_paused had just been
7913                 // called.
7914
7915                 write_ver_prefix!(writer, MIN_SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
7916
7917                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
7918                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We write
7919                 // the low bytes now and the optional high bytes later.
7920                 let user_id_low = self.context.user_id as u64;
7921                 user_id_low.write(writer)?;
7922
7923                 // Version 1 deserializers expected to read parts of the config object here. Version 2
7924                 // deserializers (0.0.99) now read config through TLVs, and as we now require them for
7925                 // `minimum_depth` we simply write dummy values here.
7926                 writer.write_all(&[0; 8])?;
7927
7928                 self.context.channel_id.write(writer)?;
7929                 {
7930                         let mut channel_state = self.context.channel_state;
7931                         if matches!(channel_state, ChannelState::AwaitingChannelReady(_)|ChannelState::ChannelReady(_)) {
7932                                 channel_state.set_peer_disconnected();
7933                         } else {
7934                                 debug_assert!(false, "Pre-funded/shutdown channels should not be written");
7935                         }
7936                         channel_state.to_u32().write(writer)?;
7937                 }
7938                 self.context.channel_value_satoshis.write(writer)?;
7939
7940                 self.context.latest_monitor_update_id.write(writer)?;
7941
7942                 // Write out the old serialization for shutdown_pubkey for backwards compatibility, if
7943                 // deserialized from that format.
7944                 match self.context.shutdown_scriptpubkey.as_ref().and_then(|script| script.as_legacy_pubkey()) {
7945                         Some(shutdown_pubkey) => shutdown_pubkey.write(writer)?,
7946                         None => [0u8; PUBLIC_KEY_SIZE].write(writer)?,
7947                 }
7948                 self.context.destination_script.write(writer)?;
7949
7950                 self.context.cur_holder_commitment_transaction_number.write(writer)?;
7951                 self.context.cur_counterparty_commitment_transaction_number.write(writer)?;
7952                 self.context.value_to_self_msat.write(writer)?;
7953
7954                 let mut dropped_inbound_htlcs = 0;
7955                 for htlc in self.context.pending_inbound_htlcs.iter() {
7956                         if let InboundHTLCState::RemoteAnnounced(_) = htlc.state {
7957                                 dropped_inbound_htlcs += 1;
7958                         }
7959                 }
7960                 (self.context.pending_inbound_htlcs.len() as u64 - dropped_inbound_htlcs).write(writer)?;
7961                 for htlc in self.context.pending_inbound_htlcs.iter() {
7962                         if let &InboundHTLCState::RemoteAnnounced(_) = &htlc.state {
7963                                 continue; // Drop
7964                         }
7965                         htlc.htlc_id.write(writer)?;
7966                         htlc.amount_msat.write(writer)?;
7967                         htlc.cltv_expiry.write(writer)?;
7968                         htlc.payment_hash.write(writer)?;
7969                         match &htlc.state {
7970                                 &InboundHTLCState::RemoteAnnounced(_) => unreachable!(),
7971                                 &InboundHTLCState::AwaitingRemoteRevokeToAnnounce(ref htlc_state) => {
7972                                         1u8.write(writer)?;
7973                                         htlc_state.write(writer)?;
7974                                 },
7975                                 &InboundHTLCState::AwaitingAnnouncedRemoteRevoke(ref htlc_state) => {
7976                                         2u8.write(writer)?;
7977                                         htlc_state.write(writer)?;
7978                                 },
7979                                 &InboundHTLCState::Committed => {
7980                                         3u8.write(writer)?;
7981                                 },
7982                                 &InboundHTLCState::LocalRemoved(ref removal_reason) => {
7983                                         4u8.write(writer)?;
7984                                         removal_reason.write(writer)?;
7985                                 },
7986                         }
7987                 }
7988
7989                 let mut preimages: Vec<&Option<PaymentPreimage>> = vec![];
7990                 let mut pending_outbound_skimmed_fees: Vec<Option<u64>> = Vec::new();
7991                 let mut pending_outbound_blinding_points: Vec<Option<PublicKey>> = Vec::new();
7992
7993                 (self.context.pending_outbound_htlcs.len() as u64).write(writer)?;
7994                 for htlc in self.context.pending_outbound_htlcs.iter() {
7995                         htlc.htlc_id.write(writer)?;
7996                         htlc.amount_msat.write(writer)?;
7997                         htlc.cltv_expiry.write(writer)?;
7998                         htlc.payment_hash.write(writer)?;
7999                         htlc.source.write(writer)?;
8000                         match &htlc.state {
8001                                 &OutboundHTLCState::LocalAnnounced(ref onion_packet) => {
8002                                         0u8.write(writer)?;
8003                                         onion_packet.write(writer)?;
8004                                 },
8005                                 &OutboundHTLCState::Committed => {
8006                                         1u8.write(writer)?;
8007                                 },
8008                                 &OutboundHTLCState::RemoteRemoved(_) => {
8009                                         // Treat this as a Committed because we haven't received the CS - they'll
8010                                         // resend the claim/fail on reconnect as we all (hopefully) the missing CS.
8011                                         1u8.write(writer)?;
8012                                 },
8013                                 &OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref outcome) => {
8014                                         3u8.write(writer)?;
8015                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
8016                                                 preimages.push(preimage);
8017                                         }
8018                                         let reason: Option<&HTLCFailReason> = outcome.into();
8019                                         reason.write(writer)?;
8020                                 }
8021                                 &OutboundHTLCState::AwaitingRemovedRemoteRevoke(ref outcome) => {
8022                                         4u8.write(writer)?;
8023                                         if let OutboundHTLCOutcome::Success(preimage) = outcome {
8024                                                 preimages.push(preimage);
8025                                         }
8026                                         let reason: Option<&HTLCFailReason> = outcome.into();
8027                                         reason.write(writer)?;
8028                                 }
8029                         }
8030                         pending_outbound_skimmed_fees.push(htlc.skimmed_fee_msat);
8031                         pending_outbound_blinding_points.push(htlc.blinding_point);
8032                 }
8033
8034                 let mut holding_cell_skimmed_fees: Vec<Option<u64>> = Vec::new();
8035                 let mut holding_cell_blinding_points: Vec<Option<PublicKey>> = Vec::new();
8036                 // Vec of (htlc_id, failure_code, sha256_of_onion)
8037                 let mut malformed_htlcs: Vec<(u64, u16, [u8; 32])> = Vec::new();
8038                 (self.context.holding_cell_htlc_updates.len() as u64).write(writer)?;
8039                 for update in self.context.holding_cell_htlc_updates.iter() {
8040                         match update {
8041                                 &HTLCUpdateAwaitingACK::AddHTLC {
8042                                         ref amount_msat, ref cltv_expiry, ref payment_hash, ref source, ref onion_routing_packet,
8043                                         blinding_point, skimmed_fee_msat,
8044                                 } => {
8045                                         0u8.write(writer)?;
8046                                         amount_msat.write(writer)?;
8047                                         cltv_expiry.write(writer)?;
8048                                         payment_hash.write(writer)?;
8049                                         source.write(writer)?;
8050                                         onion_routing_packet.write(writer)?;
8051
8052                                         holding_cell_skimmed_fees.push(skimmed_fee_msat);
8053                                         holding_cell_blinding_points.push(blinding_point);
8054                                 },
8055                                 &HTLCUpdateAwaitingACK::ClaimHTLC { ref payment_preimage, ref htlc_id } => {
8056                                         1u8.write(writer)?;
8057                                         payment_preimage.write(writer)?;
8058                                         htlc_id.write(writer)?;
8059                                 },
8060                                 &HTLCUpdateAwaitingACK::FailHTLC { ref htlc_id, ref err_packet } => {
8061                                         2u8.write(writer)?;
8062                                         htlc_id.write(writer)?;
8063                                         err_packet.write(writer)?;
8064                                 }
8065                                 &HTLCUpdateAwaitingACK::FailMalformedHTLC {
8066                                         htlc_id, failure_code, sha256_of_onion
8067                                 } => {
8068                                         // We don't want to break downgrading by adding a new variant, so write a dummy
8069                                         // `::FailHTLC` variant and write the real malformed error as an optional TLV.
8070                                         malformed_htlcs.push((htlc_id, failure_code, sha256_of_onion));
8071
8072                                         let dummy_err_packet = msgs::OnionErrorPacket { data: Vec::new() };
8073                                         2u8.write(writer)?;
8074                                         htlc_id.write(writer)?;
8075                                         dummy_err_packet.write(writer)?;
8076                                 }
8077                         }
8078                 }
8079
8080                 match self.context.resend_order {
8081                         RAACommitmentOrder::CommitmentFirst => 0u8.write(writer)?,
8082                         RAACommitmentOrder::RevokeAndACKFirst => 1u8.write(writer)?,
8083                 }
8084
8085                 self.context.monitor_pending_channel_ready.write(writer)?;
8086                 self.context.monitor_pending_revoke_and_ack.write(writer)?;
8087                 self.context.monitor_pending_commitment_signed.write(writer)?;
8088
8089                 (self.context.monitor_pending_forwards.len() as u64).write(writer)?;
8090                 for &(ref pending_forward, ref htlc_id) in self.context.monitor_pending_forwards.iter() {
8091                         pending_forward.write(writer)?;
8092                         htlc_id.write(writer)?;
8093                 }
8094
8095                 (self.context.monitor_pending_failures.len() as u64).write(writer)?;
8096                 for &(ref htlc_source, ref payment_hash, ref fail_reason) in self.context.monitor_pending_failures.iter() {
8097                         htlc_source.write(writer)?;
8098                         payment_hash.write(writer)?;
8099                         fail_reason.write(writer)?;
8100                 }
8101
8102                 if self.context.is_outbound() {
8103                         self.context.pending_update_fee.map(|(a, _)| a).write(writer)?;
8104                 } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.context.pending_update_fee {
8105                         Some(feerate).write(writer)?;
8106                 } else {
8107                         // As for inbound HTLCs, if the update was only announced and never committed in a
8108                         // commitment_signed, drop it.
8109                         None::<u32>.write(writer)?;
8110                 }
8111                 self.context.holding_cell_update_fee.write(writer)?;
8112
8113                 self.context.next_holder_htlc_id.write(writer)?;
8114                 (self.context.next_counterparty_htlc_id - dropped_inbound_htlcs).write(writer)?;
8115                 self.context.update_time_counter.write(writer)?;
8116                 self.context.feerate_per_kw.write(writer)?;
8117
8118                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8119                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8120                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8121                 // consider the stale state on reload.
8122                 0u8.write(writer)?;
8123
8124                 self.context.funding_tx_confirmed_in.write(writer)?;
8125                 self.context.funding_tx_confirmation_height.write(writer)?;
8126                 self.context.short_channel_id.write(writer)?;
8127
8128                 self.context.counterparty_dust_limit_satoshis.write(writer)?;
8129                 self.context.holder_dust_limit_satoshis.write(writer)?;
8130                 self.context.counterparty_max_htlc_value_in_flight_msat.write(writer)?;
8131
8132                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8133                 self.context.counterparty_selected_channel_reserve_satoshis.unwrap_or(0).write(writer)?;
8134
8135                 self.context.counterparty_htlc_minimum_msat.write(writer)?;
8136                 self.context.holder_htlc_minimum_msat.write(writer)?;
8137                 self.context.counterparty_max_accepted_htlcs.write(writer)?;
8138
8139                 // Note that this field is ignored by 0.0.99+ as the TLV Optional variant is used instead.
8140                 self.context.minimum_depth.unwrap_or(0).write(writer)?;
8141
8142                 match &self.context.counterparty_forwarding_info {
8143                         Some(info) => {
8144                                 1u8.write(writer)?;
8145                                 info.fee_base_msat.write(writer)?;
8146                                 info.fee_proportional_millionths.write(writer)?;
8147                                 info.cltv_expiry_delta.write(writer)?;
8148                         },
8149                         None => 0u8.write(writer)?
8150                 }
8151
8152                 self.context.channel_transaction_parameters.write(writer)?;
8153                 self.context.funding_transaction.write(writer)?;
8154
8155                 self.context.counterparty_cur_commitment_point.write(writer)?;
8156                 self.context.counterparty_prev_commitment_point.write(writer)?;
8157                 self.context.counterparty_node_id.write(writer)?;
8158
8159                 self.context.counterparty_shutdown_scriptpubkey.write(writer)?;
8160
8161                 self.context.commitment_secrets.write(writer)?;
8162
8163                 self.context.channel_update_status.write(writer)?;
8164
8165                 #[cfg(any(test, fuzzing))]
8166                 (self.context.historical_inbound_htlc_fulfills.len() as u64).write(writer)?;
8167                 #[cfg(any(test, fuzzing))]
8168                 for htlc in self.context.historical_inbound_htlc_fulfills.iter() {
8169                         htlc.write(writer)?;
8170                 }
8171
8172                 // If the channel type is something other than only-static-remote-key, then we need to have
8173                 // older clients fail to deserialize this channel at all. If the type is
8174                 // only-static-remote-key, we simply consider it "default" and don't write the channel type
8175                 // out at all.
8176                 let chan_type = if self.context.channel_type != ChannelTypeFeatures::only_static_remote_key() {
8177                         Some(&self.context.channel_type) } else { None };
8178
8179                 // The same logic applies for `holder_selected_channel_reserve_satoshis` values other than
8180                 // the default, and when `holder_max_htlc_value_in_flight_msat` is configured to be set to
8181                 // a different percentage of the channel value then 10%, which older versions of LDK used
8182                 // to set it to before the percentage was made configurable.
8183                 let serialized_holder_selected_reserve =
8184                         if self.context.holder_selected_channel_reserve_satoshis != get_legacy_default_holder_selected_channel_reserve_satoshis(self.context.channel_value_satoshis)
8185                         { Some(self.context.holder_selected_channel_reserve_satoshis) } else { None };
8186
8187                 let mut old_max_in_flight_percent_config = UserConfig::default().channel_handshake_config;
8188                 old_max_in_flight_percent_config.max_inbound_htlc_value_in_flight_percent_of_channel = MAX_IN_FLIGHT_PERCENT_LEGACY;
8189                 let serialized_holder_htlc_max_in_flight =
8190                         if self.context.holder_max_htlc_value_in_flight_msat != get_holder_max_htlc_value_in_flight_msat(self.context.channel_value_satoshis, &old_max_in_flight_percent_config)
8191                         { Some(self.context.holder_max_htlc_value_in_flight_msat) } else { None };
8192
8193                 let channel_pending_event_emitted = Some(self.context.channel_pending_event_emitted);
8194                 let channel_ready_event_emitted = Some(self.context.channel_ready_event_emitted);
8195
8196                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8197                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. Therefore,
8198                 // we write the high bytes as an option here.
8199                 let user_id_high_opt = Some((self.context.user_id >> 64) as u64);
8200
8201                 let holder_max_accepted_htlcs = if self.context.holder_max_accepted_htlcs == DEFAULT_MAX_HTLCS { None } else { Some(self.context.holder_max_accepted_htlcs) };
8202
8203                 write_tlv_fields!(writer, {
8204                         (0, self.context.announcement_sigs, option),
8205                         // minimum_depth and counterparty_selected_channel_reserve_satoshis used to have a
8206                         // default value instead of being Option<>al. Thus, to maintain compatibility we write
8207                         // them twice, once with their original default values above, and once as an option
8208                         // here. On the read side, old versions will simply ignore the odd-type entries here,
8209                         // and new versions map the default values to None and allow the TLV entries here to
8210                         // override that.
8211                         (1, self.context.minimum_depth, option),
8212                         (2, chan_type, option),
8213                         (3, self.context.counterparty_selected_channel_reserve_satoshis, option),
8214                         (4, serialized_holder_selected_reserve, option),
8215                         (5, self.context.config, required),
8216                         (6, serialized_holder_htlc_max_in_flight, option),
8217                         (7, self.context.shutdown_scriptpubkey, option),
8218                         (8, self.context.blocked_monitor_updates, optional_vec),
8219                         (9, self.context.target_closing_feerate_sats_per_kw, option),
8220                         (11, self.context.monitor_pending_finalized_fulfills, required_vec),
8221                         (13, self.context.channel_creation_height, required),
8222                         (15, preimages, required_vec),
8223                         (17, self.context.announcement_sigs_state, required),
8224                         (19, self.context.latest_inbound_scid_alias, option),
8225                         (21, self.context.outbound_scid_alias, required),
8226                         (23, channel_ready_event_emitted, option),
8227                         (25, user_id_high_opt, option),
8228                         (27, self.context.channel_keys_id, required),
8229                         (28, holder_max_accepted_htlcs, option),
8230                         (29, self.context.temporary_channel_id, option),
8231                         (31, channel_pending_event_emitted, option),
8232                         (35, pending_outbound_skimmed_fees, optional_vec),
8233                         (37, holding_cell_skimmed_fees, optional_vec),
8234                         (38, self.context.is_batch_funding, option),
8235                         (39, pending_outbound_blinding_points, optional_vec),
8236                         (41, holding_cell_blinding_points, optional_vec),
8237                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8238                         (45, self.context.local_initiated_shutdown, option), // Added in 0.0.122
8239                 });
8240
8241                 Ok(())
8242         }
8243 }
8244
8245 const MAX_ALLOC_SIZE: usize = 64*1024;
8246 impl<'a, 'b, 'c, ES: Deref, SP: Deref> ReadableArgs<(&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)> for Channel<SP>
8247                 where
8248                         ES::Target: EntropySource,
8249                         SP::Target: SignerProvider
8250 {
8251         fn read<R : io::Read>(reader: &mut R, args: (&'a ES, &'b SP, u32, &'c ChannelTypeFeatures)) -> Result<Self, DecodeError> {
8252                 let (entropy_source, signer_provider, serialized_height, our_supported_features) = args;
8253                 let ver = read_ver_prefix!(reader, SERIALIZATION_VERSION);
8254
8255                 // `user_id` used to be a single u64 value. In order to remain backwards compatible with
8256                 // versions prior to 0.0.113, the u128 is serialized as two separate u64 values. We read
8257                 // the low bytes now and the high bytes later.
8258                 let user_id_low: u64 = Readable::read(reader)?;
8259
8260                 let mut config = Some(LegacyChannelConfig::default());
8261                 if ver == 1 {
8262                         // Read the old serialization of the ChannelConfig from version 0.0.98.
8263                         config.as_mut().unwrap().options.forwarding_fee_proportional_millionths = Readable::read(reader)?;
8264                         config.as_mut().unwrap().options.cltv_expiry_delta = Readable::read(reader)?;
8265                         config.as_mut().unwrap().announced_channel = Readable::read(reader)?;
8266                         config.as_mut().unwrap().commit_upfront_shutdown_pubkey = Readable::read(reader)?;
8267                 } else {
8268                         // Read the 8 bytes of backwards-compatibility ChannelConfig data.
8269                         let mut _val: u64 = Readable::read(reader)?;
8270                 }
8271
8272                 let channel_id = Readable::read(reader)?;
8273                 let channel_state = ChannelState::from_u32(Readable::read(reader)?).map_err(|_| DecodeError::InvalidValue)?;
8274                 let channel_value_satoshis = Readable::read(reader)?;
8275
8276                 let latest_monitor_update_id = Readable::read(reader)?;
8277
8278                 let mut keys_data = None;
8279                 if ver <= 2 {
8280                         // Read the serialize signer bytes. We'll choose to deserialize them or not based on whether
8281                         // the `channel_keys_id` TLV is present below.
8282                         let keys_len: u32 = Readable::read(reader)?;
8283                         keys_data = Some(Vec::with_capacity(cmp::min(keys_len as usize, MAX_ALLOC_SIZE)));
8284                         while keys_data.as_ref().unwrap().len() != keys_len as usize {
8285                                 // Read 1KB at a time to avoid accidentally allocating 4GB on corrupted channel keys
8286                                 let mut data = [0; 1024];
8287                                 let read_slice = &mut data[0..cmp::min(1024, keys_len as usize - keys_data.as_ref().unwrap().len())];
8288                                 reader.read_exact(read_slice)?;
8289                                 keys_data.as_mut().unwrap().extend_from_slice(read_slice);
8290                         }
8291                 }
8292
8293                 // Read the old serialization for shutdown_pubkey, preferring the TLV field later if set.
8294                 let mut shutdown_scriptpubkey = match <PublicKey as Readable>::read(reader) {
8295                         Ok(pubkey) => Some(ShutdownScript::new_p2wpkh_from_pubkey(pubkey)),
8296                         Err(_) => None,
8297                 };
8298                 let destination_script = Readable::read(reader)?;
8299
8300                 let cur_holder_commitment_transaction_number = Readable::read(reader)?;
8301                 let cur_counterparty_commitment_transaction_number = Readable::read(reader)?;
8302                 let value_to_self_msat = Readable::read(reader)?;
8303
8304                 let pending_inbound_htlc_count: u64 = Readable::read(reader)?;
8305
8306                 let mut pending_inbound_htlcs = Vec::with_capacity(cmp::min(pending_inbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8307                 for _ in 0..pending_inbound_htlc_count {
8308                         pending_inbound_htlcs.push(InboundHTLCOutput {
8309                                 htlc_id: Readable::read(reader)?,
8310                                 amount_msat: Readable::read(reader)?,
8311                                 cltv_expiry: Readable::read(reader)?,
8312                                 payment_hash: Readable::read(reader)?,
8313                                 state: match <u8 as Readable>::read(reader)? {
8314                                         1 => InboundHTLCState::AwaitingRemoteRevokeToAnnounce(Readable::read(reader)?),
8315                                         2 => InboundHTLCState::AwaitingAnnouncedRemoteRevoke(Readable::read(reader)?),
8316                                         3 => InboundHTLCState::Committed,
8317                                         4 => InboundHTLCState::LocalRemoved(Readable::read(reader)?),
8318                                         _ => return Err(DecodeError::InvalidValue),
8319                                 },
8320                         });
8321                 }
8322
8323                 let pending_outbound_htlc_count: u64 = Readable::read(reader)?;
8324                 let mut pending_outbound_htlcs = Vec::with_capacity(cmp::min(pending_outbound_htlc_count as usize, DEFAULT_MAX_HTLCS as usize));
8325                 for _ in 0..pending_outbound_htlc_count {
8326                         pending_outbound_htlcs.push(OutboundHTLCOutput {
8327                                 htlc_id: Readable::read(reader)?,
8328                                 amount_msat: Readable::read(reader)?,
8329                                 cltv_expiry: Readable::read(reader)?,
8330                                 payment_hash: Readable::read(reader)?,
8331                                 source: Readable::read(reader)?,
8332                                 state: match <u8 as Readable>::read(reader)? {
8333                                         0 => OutboundHTLCState::LocalAnnounced(Box::new(Readable::read(reader)?)),
8334                                         1 => OutboundHTLCState::Committed,
8335                                         2 => {
8336                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8337                                                 OutboundHTLCState::RemoteRemoved(option.into())
8338                                         },
8339                                         3 => {
8340                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8341                                                 OutboundHTLCState::AwaitingRemoteRevokeToRemove(option.into())
8342                                         },
8343                                         4 => {
8344                                                 let option: Option<HTLCFailReason> = Readable::read(reader)?;
8345                                                 OutboundHTLCState::AwaitingRemovedRemoteRevoke(option.into())
8346                                         },
8347                                         _ => return Err(DecodeError::InvalidValue),
8348                                 },
8349                                 skimmed_fee_msat: None,
8350                                 blinding_point: None,
8351                         });
8352                 }
8353
8354                 let holding_cell_htlc_update_count: u64 = Readable::read(reader)?;
8355                 let mut holding_cell_htlc_updates = Vec::with_capacity(cmp::min(holding_cell_htlc_update_count as usize, DEFAULT_MAX_HTLCS as usize*2));
8356                 for _ in 0..holding_cell_htlc_update_count {
8357                         holding_cell_htlc_updates.push(match <u8 as Readable>::read(reader)? {
8358                                 0 => HTLCUpdateAwaitingACK::AddHTLC {
8359                                         amount_msat: Readable::read(reader)?,
8360                                         cltv_expiry: Readable::read(reader)?,
8361                                         payment_hash: Readable::read(reader)?,
8362                                         source: Readable::read(reader)?,
8363                                         onion_routing_packet: Readable::read(reader)?,
8364                                         skimmed_fee_msat: None,
8365                                         blinding_point: None,
8366                                 },
8367                                 1 => HTLCUpdateAwaitingACK::ClaimHTLC {
8368                                         payment_preimage: Readable::read(reader)?,
8369                                         htlc_id: Readable::read(reader)?,
8370                                 },
8371                                 2 => HTLCUpdateAwaitingACK::FailHTLC {
8372                                         htlc_id: Readable::read(reader)?,
8373                                         err_packet: Readable::read(reader)?,
8374                                 },
8375                                 _ => return Err(DecodeError::InvalidValue),
8376                         });
8377                 }
8378
8379                 let resend_order = match <u8 as Readable>::read(reader)? {
8380                         0 => RAACommitmentOrder::CommitmentFirst,
8381                         1 => RAACommitmentOrder::RevokeAndACKFirst,
8382                         _ => return Err(DecodeError::InvalidValue),
8383                 };
8384
8385                 let monitor_pending_channel_ready = Readable::read(reader)?;
8386                 let monitor_pending_revoke_and_ack = Readable::read(reader)?;
8387                 let monitor_pending_commitment_signed = Readable::read(reader)?;
8388
8389                 let monitor_pending_forwards_count: u64 = Readable::read(reader)?;
8390                 let mut monitor_pending_forwards = Vec::with_capacity(cmp::min(monitor_pending_forwards_count as usize, DEFAULT_MAX_HTLCS as usize));
8391                 for _ in 0..monitor_pending_forwards_count {
8392                         monitor_pending_forwards.push((Readable::read(reader)?, Readable::read(reader)?));
8393                 }
8394
8395                 let monitor_pending_failures_count: u64 = Readable::read(reader)?;
8396                 let mut monitor_pending_failures = Vec::with_capacity(cmp::min(monitor_pending_failures_count as usize, DEFAULT_MAX_HTLCS as usize));
8397                 for _ in 0..monitor_pending_failures_count {
8398                         monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?));
8399                 }
8400
8401                 let pending_update_fee_value: Option<u32> = Readable::read(reader)?;
8402
8403                 let holding_cell_update_fee = Readable::read(reader)?;
8404
8405                 let next_holder_htlc_id = Readable::read(reader)?;
8406                 let next_counterparty_htlc_id = Readable::read(reader)?;
8407                 let update_time_counter = Readable::read(reader)?;
8408                 let feerate_per_kw = Readable::read(reader)?;
8409
8410                 // Versions prior to 0.0.100 expected to read the fields of `last_sent_closing_fee` here,
8411                 // however we are supposed to restart shutdown fee negotiation on reconnect (and wipe
8412                 // `last_send_closing_fee` in `remove_uncommitted_htlcs_and_mark_paused`) so we should never
8413                 // consider the stale state on reload.
8414                 match <u8 as Readable>::read(reader)? {
8415                         0 => {},
8416                         1 => {
8417                                 let _: u32 = Readable::read(reader)?;
8418                                 let _: u64 = Readable::read(reader)?;
8419                                 let _: Signature = Readable::read(reader)?;
8420                         },
8421                         _ => return Err(DecodeError::InvalidValue),
8422                 }
8423
8424                 let funding_tx_confirmed_in = Readable::read(reader)?;
8425                 let funding_tx_confirmation_height = Readable::read(reader)?;
8426                 let short_channel_id = Readable::read(reader)?;
8427
8428                 let counterparty_dust_limit_satoshis = Readable::read(reader)?;
8429                 let holder_dust_limit_satoshis = Readable::read(reader)?;
8430                 let counterparty_max_htlc_value_in_flight_msat = Readable::read(reader)?;
8431                 let mut counterparty_selected_channel_reserve_satoshis = None;
8432                 if ver == 1 {
8433                         // Read the old serialization from version 0.0.98.
8434                         counterparty_selected_channel_reserve_satoshis = Some(Readable::read(reader)?);
8435                 } else {
8436                         // Read the 8 bytes of backwards-compatibility data.
8437                         let _dummy: u64 = Readable::read(reader)?;
8438                 }
8439                 let counterparty_htlc_minimum_msat = Readable::read(reader)?;
8440                 let holder_htlc_minimum_msat = Readable::read(reader)?;
8441                 let counterparty_max_accepted_htlcs = Readable::read(reader)?;
8442
8443                 let mut minimum_depth = None;
8444                 if ver == 1 {
8445                         // Read the old serialization from version 0.0.98.
8446                         minimum_depth = Some(Readable::read(reader)?);
8447                 } else {
8448                         // Read the 4 bytes of backwards-compatibility data.
8449                         let _dummy: u32 = Readable::read(reader)?;
8450                 }
8451
8452                 let counterparty_forwarding_info = match <u8 as Readable>::read(reader)? {
8453                         0 => None,
8454                         1 => Some(CounterpartyForwardingInfo {
8455                                 fee_base_msat: Readable::read(reader)?,
8456                                 fee_proportional_millionths: Readable::read(reader)?,
8457                                 cltv_expiry_delta: Readable::read(reader)?,
8458                         }),
8459                         _ => return Err(DecodeError::InvalidValue),
8460                 };
8461
8462                 let mut channel_parameters: ChannelTransactionParameters = Readable::read(reader)?;
8463                 let funding_transaction: Option<Transaction> = Readable::read(reader)?;
8464
8465                 let counterparty_cur_commitment_point = Readable::read(reader)?;
8466
8467                 let counterparty_prev_commitment_point = Readable::read(reader)?;
8468                 let counterparty_node_id = Readable::read(reader)?;
8469
8470                 let counterparty_shutdown_scriptpubkey = Readable::read(reader)?;
8471                 let commitment_secrets = Readable::read(reader)?;
8472
8473                 let channel_update_status = Readable::read(reader)?;
8474
8475                 #[cfg(any(test, fuzzing))]
8476                 let mut historical_inbound_htlc_fulfills = new_hash_set();
8477                 #[cfg(any(test, fuzzing))]
8478                 {
8479                         let htlc_fulfills_len: u64 = Readable::read(reader)?;
8480                         for _ in 0..htlc_fulfills_len {
8481                                 assert!(historical_inbound_htlc_fulfills.insert(Readable::read(reader)?));
8482                         }
8483                 }
8484
8485                 let pending_update_fee = if let Some(feerate) = pending_update_fee_value {
8486                         Some((feerate, if channel_parameters.is_outbound_from_holder {
8487                                 FeeUpdateState::Outbound
8488                         } else {
8489                                 FeeUpdateState::AwaitingRemoteRevokeToAnnounce
8490                         }))
8491                 } else {
8492                         None
8493                 };
8494
8495                 let mut announcement_sigs = None;
8496                 let mut target_closing_feerate_sats_per_kw = None;
8497                 let mut monitor_pending_finalized_fulfills = Some(Vec::new());
8498                 let mut holder_selected_channel_reserve_satoshis = Some(get_legacy_default_holder_selected_channel_reserve_satoshis(channel_value_satoshis));
8499                 let mut holder_max_htlc_value_in_flight_msat = Some(get_holder_max_htlc_value_in_flight_msat(channel_value_satoshis, &UserConfig::default().channel_handshake_config));
8500                 // Prior to supporting channel type negotiation, all of our channels were static_remotekey
8501                 // only, so we default to that if none was written.
8502                 let mut channel_type = Some(ChannelTypeFeatures::only_static_remote_key());
8503                 let mut channel_creation_height = Some(serialized_height);
8504                 let mut preimages_opt: Option<Vec<Option<PaymentPreimage>>> = None;
8505
8506                 // If we read an old Channel, for simplicity we just treat it as "we never sent an
8507                 // AnnouncementSignatures" which implies we'll re-send it on reconnect, but that's fine.
8508                 let mut announcement_sigs_state = Some(AnnouncementSigsState::NotSent);
8509                 let mut latest_inbound_scid_alias = None;
8510                 let mut outbound_scid_alias = None;
8511                 let mut channel_pending_event_emitted = None;
8512                 let mut channel_ready_event_emitted = None;
8513
8514                 let mut user_id_high_opt: Option<u64> = None;
8515                 let mut channel_keys_id: Option<[u8; 32]> = None;
8516                 let mut temporary_channel_id: Option<ChannelId> = None;
8517                 let mut holder_max_accepted_htlcs: Option<u16> = None;
8518
8519                 let mut blocked_monitor_updates = Some(Vec::new());
8520
8521                 let mut pending_outbound_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8522                 let mut holding_cell_skimmed_fees_opt: Option<Vec<Option<u64>>> = None;
8523
8524                 let mut is_batch_funding: Option<()> = None;
8525
8526                 let mut local_initiated_shutdown: Option<()> = None;
8527
8528                 let mut pending_outbound_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8529                 let mut holding_cell_blinding_points_opt: Option<Vec<Option<PublicKey>>> = None;
8530
8531                 let mut malformed_htlcs: Option<Vec<(u64, u16, [u8; 32])>> = None;
8532
8533                 read_tlv_fields!(reader, {
8534                         (0, announcement_sigs, option),
8535                         (1, minimum_depth, option),
8536                         (2, channel_type, option),
8537                         (3, counterparty_selected_channel_reserve_satoshis, option),
8538                         (4, holder_selected_channel_reserve_satoshis, option),
8539                         (5, config, option), // Note that if none is provided we will *not* overwrite the existing one.
8540                         (6, holder_max_htlc_value_in_flight_msat, option),
8541                         (7, shutdown_scriptpubkey, option),
8542                         (8, blocked_monitor_updates, optional_vec),
8543                         (9, target_closing_feerate_sats_per_kw, option),
8544                         (11, monitor_pending_finalized_fulfills, optional_vec),
8545                         (13, channel_creation_height, option),
8546                         (15, preimages_opt, optional_vec),
8547                         (17, announcement_sigs_state, option),
8548                         (19, latest_inbound_scid_alias, option),
8549                         (21, outbound_scid_alias, option),
8550                         (23, channel_ready_event_emitted, option),
8551                         (25, user_id_high_opt, option),
8552                         (27, channel_keys_id, option),
8553                         (28, holder_max_accepted_htlcs, option),
8554                         (29, temporary_channel_id, option),
8555                         (31, channel_pending_event_emitted, option),
8556                         (35, pending_outbound_skimmed_fees_opt, optional_vec),
8557                         (37, holding_cell_skimmed_fees_opt, optional_vec),
8558                         (38, is_batch_funding, option),
8559                         (39, pending_outbound_blinding_points_opt, optional_vec),
8560                         (41, holding_cell_blinding_points_opt, optional_vec),
8561                         (43, malformed_htlcs, optional_vec), // Added in 0.0.119
8562                         (45, local_initiated_shutdown, option),
8563                 });
8564
8565                 let (channel_keys_id, holder_signer) = if let Some(channel_keys_id) = channel_keys_id {
8566                         let mut holder_signer = signer_provider.derive_channel_signer(channel_value_satoshis, channel_keys_id);
8567                         // If we've gotten to the funding stage of the channel, populate the signer with its
8568                         // required channel parameters.
8569                         if channel_state >= ChannelState::FundingNegotiated {
8570                                 holder_signer.provide_channel_parameters(&channel_parameters);
8571                         }
8572                         (channel_keys_id, holder_signer)
8573                 } else {
8574                         // `keys_data` can be `None` if we had corrupted data.
8575                         let keys_data = keys_data.ok_or(DecodeError::InvalidValue)?;
8576                         let holder_signer = signer_provider.read_chan_signer(&keys_data)?;
8577                         (holder_signer.channel_keys_id(), holder_signer)
8578                 };
8579
8580                 if let Some(preimages) = preimages_opt {
8581                         let mut iter = preimages.into_iter();
8582                         for htlc in pending_outbound_htlcs.iter_mut() {
8583                                 match &htlc.state {
8584                                         OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(None)) => {
8585                                                 htlc.state = OutboundHTLCState::AwaitingRemoteRevokeToRemove(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8586                                         }
8587                                         OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(None)) => {
8588                                                 htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(OutboundHTLCOutcome::Success(iter.next().ok_or(DecodeError::InvalidValue)?));
8589                                         }
8590                                         _ => {}
8591                                 }
8592                         }
8593                         // We expect all preimages to be consumed above
8594                         if iter.next().is_some() {
8595                                 return Err(DecodeError::InvalidValue);
8596                         }
8597                 }
8598
8599                 let chan_features = channel_type.as_ref().unwrap();
8600                 if !chan_features.is_subset(our_supported_features) {
8601                         // If the channel was written by a new version and negotiated with features we don't
8602                         // understand yet, refuse to read it.
8603                         return Err(DecodeError::UnknownRequiredFeature);
8604                 }
8605
8606                 // ChannelTransactionParameters may have had an empty features set upon deserialization.
8607                 // To account for that, we're proactively setting/overriding the field here.
8608                 channel_parameters.channel_type_features = chan_features.clone();
8609
8610                 let mut secp_ctx = Secp256k1::new();
8611                 secp_ctx.seeded_randomize(&entropy_source.get_secure_random_bytes());
8612
8613                 // `user_id` used to be a single u64 value. In order to remain backwards
8614                 // compatible with versions prior to 0.0.113, the u128 is serialized as two
8615                 // separate u64 values.
8616                 let user_id = user_id_low as u128 + ((user_id_high_opt.unwrap_or(0) as u128) << 64);
8617
8618                 let holder_max_accepted_htlcs = holder_max_accepted_htlcs.unwrap_or(DEFAULT_MAX_HTLCS);
8619
8620                 if let Some(skimmed_fees) = pending_outbound_skimmed_fees_opt {
8621                         let mut iter = skimmed_fees.into_iter();
8622                         for htlc in pending_outbound_htlcs.iter_mut() {
8623                                 htlc.skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8624                         }
8625                         // We expect all skimmed fees to be consumed above
8626                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8627                 }
8628                 if let Some(skimmed_fees) = holding_cell_skimmed_fees_opt {
8629                         let mut iter = skimmed_fees.into_iter();
8630                         for htlc in holding_cell_htlc_updates.iter_mut() {
8631                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut skimmed_fee_msat, .. } = htlc {
8632                                         *skimmed_fee_msat = iter.next().ok_or(DecodeError::InvalidValue)?;
8633                                 }
8634                         }
8635                         // We expect all skimmed fees to be consumed above
8636                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8637                 }
8638                 if let Some(blinding_pts) = pending_outbound_blinding_points_opt {
8639                         let mut iter = blinding_pts.into_iter();
8640                         for htlc in pending_outbound_htlcs.iter_mut() {
8641                                 htlc.blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8642                         }
8643                         // We expect all blinding points to be consumed above
8644                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8645                 }
8646                 if let Some(blinding_pts) = holding_cell_blinding_points_opt {
8647                         let mut iter = blinding_pts.into_iter();
8648                         for htlc in holding_cell_htlc_updates.iter_mut() {
8649                                 if let HTLCUpdateAwaitingACK::AddHTLC { ref mut blinding_point, .. } = htlc {
8650                                         *blinding_point = iter.next().ok_or(DecodeError::InvalidValue)?;
8651                                 }
8652                         }
8653                         // We expect all blinding points to be consumed above
8654                         if iter.next().is_some() { return Err(DecodeError::InvalidValue) }
8655                 }
8656
8657                 if let Some(malformed_htlcs) = malformed_htlcs {
8658                         for (malformed_htlc_id, failure_code, sha256_of_onion) in malformed_htlcs {
8659                                 let htlc_idx = holding_cell_htlc_updates.iter().position(|htlc| {
8660                                         if let HTLCUpdateAwaitingACK::FailHTLC { htlc_id, err_packet } = htlc {
8661                                                 let matches = *htlc_id == malformed_htlc_id;
8662                                                 if matches { debug_assert!(err_packet.data.is_empty()) }
8663                                                 matches
8664                                         } else { false }
8665                                 }).ok_or(DecodeError::InvalidValue)?;
8666                                 let malformed_htlc = HTLCUpdateAwaitingACK::FailMalformedHTLC {
8667                                         htlc_id: malformed_htlc_id, failure_code, sha256_of_onion
8668                                 };
8669                                 let _ = core::mem::replace(&mut holding_cell_htlc_updates[htlc_idx], malformed_htlc);
8670                         }
8671                 }
8672
8673                 Ok(Channel {
8674                         context: ChannelContext {
8675                                 user_id,
8676
8677                                 config: config.unwrap(),
8678
8679                                 prev_config: None,
8680
8681                                 // Note that we don't care about serializing handshake limits as we only ever serialize
8682                                 // channel data after the handshake has completed.
8683                                 inbound_handshake_limits_override: None,
8684
8685                                 channel_id,
8686                                 temporary_channel_id,
8687                                 channel_state,
8688                                 announcement_sigs_state: announcement_sigs_state.unwrap(),
8689                                 secp_ctx,
8690                                 channel_value_satoshis,
8691
8692                                 latest_monitor_update_id,
8693
8694                                 holder_signer: ChannelSignerType::Ecdsa(holder_signer),
8695                                 shutdown_scriptpubkey,
8696                                 destination_script,
8697
8698                                 cur_holder_commitment_transaction_number,
8699                                 cur_counterparty_commitment_transaction_number,
8700                                 value_to_self_msat,
8701
8702                                 holder_max_accepted_htlcs,
8703                                 pending_inbound_htlcs,
8704                                 pending_outbound_htlcs,
8705                                 holding_cell_htlc_updates,
8706
8707                                 resend_order,
8708
8709                                 monitor_pending_channel_ready,
8710                                 monitor_pending_revoke_and_ack,
8711                                 monitor_pending_commitment_signed,
8712                                 monitor_pending_forwards,
8713                                 monitor_pending_failures,
8714                                 monitor_pending_finalized_fulfills: monitor_pending_finalized_fulfills.unwrap(),
8715
8716                                 signer_pending_commitment_update: false,
8717                                 signer_pending_funding: false,
8718
8719                                 pending_update_fee,
8720                                 holding_cell_update_fee,
8721                                 next_holder_htlc_id,
8722                                 next_counterparty_htlc_id,
8723                                 update_time_counter,
8724                                 feerate_per_kw,
8725
8726                                 #[cfg(debug_assertions)]
8727                                 holder_max_commitment_tx_output: Mutex::new((0, 0)),
8728                                 #[cfg(debug_assertions)]
8729                                 counterparty_max_commitment_tx_output: Mutex::new((0, 0)),
8730
8731                                 last_sent_closing_fee: None,
8732                                 pending_counterparty_closing_signed: None,
8733                                 expecting_peer_commitment_signed: false,
8734                                 closing_fee_limits: None,
8735                                 target_closing_feerate_sats_per_kw,
8736
8737                                 funding_tx_confirmed_in,
8738                                 funding_tx_confirmation_height,
8739                                 short_channel_id,
8740                                 channel_creation_height: channel_creation_height.unwrap(),
8741
8742                                 counterparty_dust_limit_satoshis,
8743                                 holder_dust_limit_satoshis,
8744                                 counterparty_max_htlc_value_in_flight_msat,
8745                                 holder_max_htlc_value_in_flight_msat: holder_max_htlc_value_in_flight_msat.unwrap(),
8746                                 counterparty_selected_channel_reserve_satoshis,
8747                                 holder_selected_channel_reserve_satoshis: holder_selected_channel_reserve_satoshis.unwrap(),
8748                                 counterparty_htlc_minimum_msat,
8749                                 holder_htlc_minimum_msat,
8750                                 counterparty_max_accepted_htlcs,
8751                                 minimum_depth,
8752
8753                                 counterparty_forwarding_info,
8754
8755                                 channel_transaction_parameters: channel_parameters,
8756                                 funding_transaction,
8757                                 is_batch_funding,
8758
8759                                 counterparty_cur_commitment_point,
8760                                 counterparty_prev_commitment_point,
8761                                 counterparty_node_id,
8762
8763                                 counterparty_shutdown_scriptpubkey,
8764
8765                                 commitment_secrets,
8766
8767                                 channel_update_status,
8768                                 closing_signed_in_flight: false,
8769
8770                                 announcement_sigs,
8771
8772                                 #[cfg(any(test, fuzzing))]
8773                                 next_local_commitment_tx_fee_info_cached: Mutex::new(None),
8774                                 #[cfg(any(test, fuzzing))]
8775                                 next_remote_commitment_tx_fee_info_cached: Mutex::new(None),
8776
8777                                 workaround_lnd_bug_4006: None,
8778                                 sent_message_awaiting_response: None,
8779
8780                                 latest_inbound_scid_alias,
8781                                 // Later in the ChannelManager deserialization phase we scan for channels and assign scid aliases if its missing
8782                                 outbound_scid_alias: outbound_scid_alias.unwrap_or(0),
8783
8784                                 channel_pending_event_emitted: channel_pending_event_emitted.unwrap_or(true),
8785                                 channel_ready_event_emitted: channel_ready_event_emitted.unwrap_or(true),
8786
8787                                 #[cfg(any(test, fuzzing))]
8788                                 historical_inbound_htlc_fulfills,
8789
8790                                 channel_type: channel_type.unwrap(),
8791                                 channel_keys_id,
8792
8793                                 local_initiated_shutdown,
8794
8795                                 blocked_monitor_updates: blocked_monitor_updates.unwrap(),
8796                         }
8797                 })
8798         }
8799 }
8800
8801 #[cfg(test)]
8802 mod tests {
8803         use std::cmp;
8804         use bitcoin::blockdata::constants::ChainHash;
8805         use bitcoin::blockdata::script::{ScriptBuf, Builder};
8806         use bitcoin::blockdata::transaction::{Transaction, TxOut};
8807         use bitcoin::blockdata::opcodes;
8808         use bitcoin::network::constants::Network;
8809         use crate::ln::onion_utils::INVALID_ONION_BLINDING;
8810         use crate::ln::{PaymentHash, PaymentPreimage};
8811         use crate::ln::channel_keys::{RevocationKey, RevocationBasepoint};
8812         use crate::ln::channelmanager::{self, HTLCSource, PaymentId};
8813         use crate::ln::channel::InitFeatures;
8814         use crate::ln::channel::{AwaitingChannelReadyFlags, Channel, ChannelState, InboundHTLCOutput, OutboundV1Channel, InboundV1Channel, OutboundHTLCOutput, InboundHTLCState, OutboundHTLCState, HTLCCandidate, HTLCInitiator, HTLCUpdateAwaitingACK, commit_tx_fee_msat};
8815         use crate::ln::channel::{MAX_FUNDING_SATOSHIS_NO_WUMBO, TOTAL_BITCOIN_SUPPLY_SATOSHIS, MIN_THEIR_CHAN_RESERVE_SATOSHIS};
8816         use crate::ln::features::{ChannelFeatures, ChannelTypeFeatures, NodeFeatures};
8817         use crate::ln::msgs;
8818         use crate::ln::msgs::{ChannelUpdate, DecodeError, UnsignedChannelUpdate, MAX_VALUE_MSAT};
8819         use crate::ln::script::ShutdownScript;
8820         use crate::ln::chan_utils::{self, htlc_success_tx_weight, htlc_timeout_tx_weight};
8821         use crate::chain::BestBlock;
8822         use crate::chain::chaininterface::{FeeEstimator, LowerBoundedFeeEstimator, ConfirmationTarget};
8823         use crate::sign::{ChannelSigner, InMemorySigner, EntropySource, SignerProvider};
8824         use crate::chain::transaction::OutPoint;
8825         use crate::routing::router::{Path, RouteHop};
8826         use crate::util::config::UserConfig;
8827         use crate::util::errors::APIError;
8828         use crate::util::ser::{ReadableArgs, Writeable};
8829         use crate::util::test_utils;
8830         use crate::util::test_utils::{OnGetShutdownScriptpubkey, TestKeysInterface};
8831         use bitcoin::secp256k1::{Secp256k1, ecdsa::Signature};
8832         use bitcoin::secp256k1::ffi::Signature as FFISignature;
8833         use bitcoin::secp256k1::{SecretKey,PublicKey};
8834         use bitcoin::hashes::sha256::Hash as Sha256;
8835         use bitcoin::hashes::Hash;
8836         use bitcoin::hashes::hex::FromHex;
8837         use bitcoin::hash_types::WPubkeyHash;
8838         use bitcoin::blockdata::locktime::absolute::LockTime;
8839         use bitcoin::address::{WitnessProgram, WitnessVersion};
8840         use crate::prelude::*;
8841
8842         #[test]
8843         fn test_channel_state_order() {
8844                 use crate::ln::channel::NegotiatingFundingFlags;
8845                 use crate::ln::channel::AwaitingChannelReadyFlags;
8846                 use crate::ln::channel::ChannelReadyFlags;
8847
8848                 assert!(ChannelState::NegotiatingFunding(NegotiatingFundingFlags::new()) < ChannelState::FundingNegotiated);
8849                 assert!(ChannelState::FundingNegotiated < ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()));
8850                 assert!(ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()) < ChannelState::ChannelReady(ChannelReadyFlags::new()));
8851                 assert!(ChannelState::ChannelReady(ChannelReadyFlags::new()) < ChannelState::ShutdownComplete);
8852         }
8853
8854         struct TestFeeEstimator {
8855                 fee_est: u32
8856         }
8857         impl FeeEstimator for TestFeeEstimator {
8858                 fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 {
8859                         self.fee_est
8860                 }
8861         }
8862
8863         #[test]
8864         fn test_max_funding_satoshis_no_wumbo() {
8865                 assert_eq!(TOTAL_BITCOIN_SUPPLY_SATOSHIS, 21_000_000 * 100_000_000);
8866                 assert!(MAX_FUNDING_SATOSHIS_NO_WUMBO <= TOTAL_BITCOIN_SUPPLY_SATOSHIS,
8867                         "MAX_FUNDING_SATOSHIS_NO_WUMBO is greater than all satoshis in existence");
8868         }
8869
8870         struct Keys {
8871                 signer: InMemorySigner,
8872         }
8873
8874         impl EntropySource for Keys {
8875                 fn get_secure_random_bytes(&self) -> [u8; 32] { [0; 32] }
8876         }
8877
8878         impl SignerProvider for Keys {
8879                 type EcdsaSigner = InMemorySigner;
8880                 #[cfg(taproot)]
8881                 type TaprootSigner = InMemorySigner;
8882
8883                 fn generate_channel_keys_id(&self, _inbound: bool, _channel_value_satoshis: u64, _user_channel_id: u128) -> [u8; 32] {
8884                         self.signer.channel_keys_id()
8885                 }
8886
8887                 fn derive_channel_signer(&self, _channel_value_satoshis: u64, _channel_keys_id: [u8; 32]) -> Self::EcdsaSigner {
8888                         self.signer.clone()
8889                 }
8890
8891                 fn read_chan_signer(&self, _data: &[u8]) -> Result<Self::EcdsaSigner, DecodeError> { panic!(); }
8892
8893                 fn get_destination_script(&self, _channel_keys_id: [u8; 32]) -> Result<ScriptBuf, ()> {
8894                         let secp_ctx = Secp256k1::signing_only();
8895                         let channel_monitor_claim_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8896                         let channel_monitor_claim_key_hash = WPubkeyHash::hash(&PublicKey::from_secret_key(&secp_ctx, &channel_monitor_claim_key).serialize());
8897                         Ok(Builder::new().push_opcode(opcodes::all::OP_PUSHBYTES_0).push_slice(channel_monitor_claim_key_hash).into_script())
8898                 }
8899
8900                 fn get_shutdown_scriptpubkey(&self) -> Result<ShutdownScript, ()> {
8901                         let secp_ctx = Secp256k1::signing_only();
8902                         let channel_close_key = SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap();
8903                         Ok(ShutdownScript::new_p2wpkh_from_pubkey(PublicKey::from_secret_key(&secp_ctx, &channel_close_key)))
8904                 }
8905         }
8906
8907         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
8908         fn public_from_secret_hex(secp_ctx: &Secp256k1<bitcoin::secp256k1::All>, hex: &str) -> PublicKey {
8909                 PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&<Vec<u8>>::from_hex(hex).unwrap()[..]).unwrap())
8910         }
8911
8912         #[test]
8913         fn upfront_shutdown_script_incompatibility() {
8914                 let features = channelmanager::provided_init_features(&UserConfig::default()).clear_shutdown_anysegwit();
8915                 let non_v0_segwit_shutdown_script = ShutdownScript::new_witness_program(
8916                         &WitnessProgram::new(WitnessVersion::V16, &[0, 40]).unwrap(),
8917                 ).unwrap();
8918
8919                 let seed = [42; 32];
8920                 let network = Network::Testnet;
8921                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8922                 keys_provider.expect(OnGetShutdownScriptpubkey {
8923                         returns: non_v0_segwit_shutdown_script.clone(),
8924                 });
8925
8926                 let secp_ctx = Secp256k1::new();
8927                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8928                 let config = UserConfig::default();
8929                 match OutboundV1Channel::<&TestKeysInterface>::new(&LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 253 }), &&keys_provider, &&keys_provider, node_id, &features, 10000000, 100000, 42, &config, 0, 42, None) {
8930                         Err(APIError::IncompatibleShutdownScript { script }) => {
8931                                 assert_eq!(script.into_inner(), non_v0_segwit_shutdown_script.into_inner());
8932                         },
8933                         Err(e) => panic!("Unexpected error: {:?}", e),
8934                         Ok(_) => panic!("Expected error"),
8935                 }
8936         }
8937
8938         // Check that, during channel creation, we use the same feerate in the open channel message
8939         // as we do in the Channel object creation itself.
8940         #[test]
8941         fn test_open_channel_msg_fee() {
8942                 let original_fee = 253;
8943                 let mut fee_est = TestFeeEstimator{fee_est: original_fee };
8944                 let bounded_fee_estimator = LowerBoundedFeeEstimator::new(&fee_est);
8945                 let secp_ctx = Secp256k1::new();
8946                 let seed = [42; 32];
8947                 let network = Network::Testnet;
8948                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8949
8950                 let node_a_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8951                 let config = UserConfig::default();
8952                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&bounded_fee_estimator, &&keys_provider, &&keys_provider, node_a_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8953
8954                 // Now change the fee so we can check that the fee in the open_channel message is the
8955                 // same as the old fee.
8956                 fee_est.fee_est = 500;
8957                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8958                 assert_eq!(open_channel_msg.common_fields.commitment_feerate_sat_per_1000_weight, original_fee);
8959         }
8960
8961         #[test]
8962         fn test_holder_vs_counterparty_dust_limit() {
8963                 // Test that when calculating the local and remote commitment transaction fees, the correct
8964                 // dust limits are used.
8965                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
8966                 let secp_ctx = Secp256k1::new();
8967                 let seed = [42; 32];
8968                 let network = Network::Testnet;
8969                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
8970                 let logger = test_utils::TestLogger::new();
8971                 let best_block = BestBlock::from_network(network);
8972
8973                 // Go through the flow of opening a channel between two nodes, making sure
8974                 // they have different dust limits.
8975
8976                 // Create Node A's channel pointing to Node B's pubkey
8977                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
8978                 let config = UserConfig::default();
8979                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
8980
8981                 // Create Node B's channel by receiving Node A's open_channel message
8982                 // Make sure A's dust limit is as we expect.
8983                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
8984                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
8985                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
8986
8987                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
8988                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
8989                 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
8990                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
8991                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
8992
8993                 // Node A --> Node B: funding created
8994                 let output_script = node_a_chan.context.get_funding_redeemscript();
8995                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
8996                         value: 10000000, script_pubkey: output_script.clone(),
8997                 }]};
8998                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
8999                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9000                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9001
9002                 // Node B --> Node A: funding signed
9003                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9004                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9005
9006                 // Put some inbound and outbound HTLCs in A's channel.
9007                 let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's.
9008                 node_a_chan.context.pending_inbound_htlcs.push(InboundHTLCOutput {
9009                         htlc_id: 0,
9010                         amount_msat: htlc_amount_msat,
9011                         payment_hash: PaymentHash(Sha256::hash(&[42; 32]).to_byte_array()),
9012                         cltv_expiry: 300000000,
9013                         state: InboundHTLCState::Committed,
9014                 });
9015
9016                 node_a_chan.context.pending_outbound_htlcs.push(OutboundHTLCOutput {
9017                         htlc_id: 1,
9018                         amount_msat: htlc_amount_msat, // put an amount below A's dust amount but above B's.
9019                         payment_hash: PaymentHash(Sha256::hash(&[43; 32]).to_byte_array()),
9020                         cltv_expiry: 200000000,
9021                         state: OutboundHTLCState::Committed,
9022                         source: HTLCSource::OutboundRoute {
9023                                 path: Path { hops: Vec::new(), blinded_tail: None },
9024                                 session_priv: SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9025                                 first_hop_htlc_msat: 548,
9026                                 payment_id: PaymentId([42; 32]),
9027                         },
9028                         skimmed_fee_msat: None,
9029                         blinding_point: None,
9030                 });
9031
9032                 // Make sure when Node A calculates their local commitment transaction, none of the HTLCs pass
9033                 // the dust limit check.
9034                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9035                 let local_commit_tx_fee = node_a_chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9036                 let local_commit_fee_0_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 0, node_a_chan.context.get_channel_type());
9037                 assert_eq!(local_commit_tx_fee, local_commit_fee_0_htlcs);
9038
9039                 // Finally, make sure that when Node A calculates the remote's commitment transaction fees, all
9040                 // of the HTLCs are seen to be above the dust limit.
9041                 node_a_chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9042                 let remote_commit_fee_3_htlcs = commit_tx_fee_msat(node_a_chan.context.feerate_per_kw, 3, node_a_chan.context.get_channel_type());
9043                 let htlc_candidate = HTLCCandidate::new(htlc_amount_msat, HTLCInitiator::LocalOffered);
9044                 let remote_commit_tx_fee = node_a_chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9045                 assert_eq!(remote_commit_tx_fee, remote_commit_fee_3_htlcs);
9046         }
9047
9048         #[test]
9049         fn test_timeout_vs_success_htlc_dust_limit() {
9050                 // Make sure that when `next_remote_commit_tx_fee_msat` and `next_local_commit_tx_fee_msat`
9051                 // calculate the real dust limits for HTLCs (i.e. the dust limit given by the counterparty
9052                 // *plus* the fees paid for the HTLC) they don't swap `HTLC_SUCCESS_TX_WEIGHT` for
9053                 // `HTLC_TIMEOUT_TX_WEIGHT`, and vice versa.
9054                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 253 });
9055                 let secp_ctx = Secp256k1::new();
9056                 let seed = [42; 32];
9057                 let network = Network::Testnet;
9058                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9059
9060                 let node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9061                 let config = UserConfig::default();
9062                 let mut chan = OutboundV1Channel::<&TestKeysInterface>::new(&fee_est, &&keys_provider, &&keys_provider, node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9063
9064                 let commitment_tx_fee_0_htlcs = commit_tx_fee_msat(chan.context.feerate_per_kw, 0, chan.context.get_channel_type());
9065                 let commitment_tx_fee_1_htlc = commit_tx_fee_msat(chan.context.feerate_per_kw, 1, chan.context.get_channel_type());
9066
9067                 // If HTLC_SUCCESS_TX_WEIGHT and HTLC_TIMEOUT_TX_WEIGHT were swapped: then this HTLC would be
9068                 // counted as dust when it shouldn't be.
9069                 let htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis + 1) * 1000;
9070                 let htlc_candidate = HTLCCandidate::new(htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9071                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9072                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9073
9074                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9075                 let dust_htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.holder_dust_limit_satoshis - 1) * 1000;
9076                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9077                 let commitment_tx_fee = chan.context.next_local_commit_tx_fee_msat(htlc_candidate, None);
9078                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9079
9080                 chan.context.channel_transaction_parameters.is_outbound_from_holder = false;
9081
9082                 // If swapped: this HTLC would be counted as non-dust when it shouldn't be.
9083                 let dust_htlc_amt_above_timeout = ((253 * htlc_timeout_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis + 1) * 1000;
9084                 let htlc_candidate = HTLCCandidate::new(dust_htlc_amt_above_timeout, HTLCInitiator::LocalOffered);
9085                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9086                 assert_eq!(commitment_tx_fee, commitment_tx_fee_0_htlcs);
9087
9088                 // If swapped: this HTLC would be counted as dust when it shouldn't be.
9089                 let htlc_amt_below_success = ((253 * htlc_success_tx_weight(chan.context.get_channel_type()) / 1000) + chan.context.counterparty_dust_limit_satoshis - 1) * 1000;
9090                 let htlc_candidate = HTLCCandidate::new(htlc_amt_below_success, HTLCInitiator::RemoteOffered);
9091                 let commitment_tx_fee = chan.context.next_remote_commit_tx_fee_msat(htlc_candidate, None);
9092                 assert_eq!(commitment_tx_fee, commitment_tx_fee_1_htlc);
9093         }
9094
9095         #[test]
9096         fn channel_reestablish_no_updates() {
9097                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9098                 let logger = test_utils::TestLogger::new();
9099                 let secp_ctx = Secp256k1::new();
9100                 let seed = [42; 32];
9101                 let network = Network::Testnet;
9102                 let best_block = BestBlock::from_network(network);
9103                 let chain_hash = ChainHash::using_genesis_block(network);
9104                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9105
9106                 // Go through the flow of opening a channel between two nodes.
9107
9108                 // Create Node A's channel pointing to Node B's pubkey
9109                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9110                 let config = UserConfig::default();
9111                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9112
9113                 // Create Node B's channel by receiving Node A's open_channel message
9114                 let open_channel_msg = node_a_chan.get_open_channel(chain_hash);
9115                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9116                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9117
9118                 // Node B --> Node A: accept channel
9119                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
9120                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9121
9122                 // Node A --> Node B: funding created
9123                 let output_script = node_a_chan.context.get_funding_redeemscript();
9124                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9125                         value: 10000000, script_pubkey: output_script.clone(),
9126                 }]};
9127                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9128                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9129                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9130
9131                 // Node B --> Node A: funding signed
9132                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9133                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9134
9135                 // Now disconnect the two nodes and check that the commitment point in
9136                 // Node B's channel_reestablish message is sane.
9137                 assert!(node_b_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9138                 let msg = node_b_chan.get_channel_reestablish(&&logger);
9139                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9140                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9141                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9142
9143                 // Check that the commitment point in Node A's channel_reestablish message
9144                 // is sane.
9145                 assert!(node_a_chan.remove_uncommitted_htlcs_and_mark_paused(&&logger).is_ok());
9146                 let msg = node_a_chan.get_channel_reestablish(&&logger);
9147                 assert_eq!(msg.next_local_commitment_number, 1); // now called next_commitment_number
9148                 assert_eq!(msg.next_remote_commitment_number, 0); // now called next_revocation_number
9149                 assert_eq!(msg.your_last_per_commitment_secret, [0; 32]);
9150         }
9151
9152         #[test]
9153         fn test_configured_holder_max_htlc_value_in_flight() {
9154                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9155                 let logger = test_utils::TestLogger::new();
9156                 let secp_ctx = Secp256k1::new();
9157                 let seed = [42; 32];
9158                 let network = Network::Testnet;
9159                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9160                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9161                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9162
9163                 let mut config_2_percent = UserConfig::default();
9164                 config_2_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 2;
9165                 let mut config_99_percent = UserConfig::default();
9166                 config_99_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 99;
9167                 let mut config_0_percent = UserConfig::default();
9168                 config_0_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 0;
9169                 let mut config_101_percent = UserConfig::default();
9170                 config_101_percent.channel_handshake_config.max_inbound_htlc_value_in_flight_percent_of_channel = 101;
9171
9172                 // Test that `OutboundV1Channel::new` creates a channel with the correct value for
9173                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9174                 // which is set to the lower bound + 1 (2%) of the `channel_value`.
9175                 let chan_1 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_2_percent), 10000000, 100000, 42, &config_2_percent, 0, 42, None).unwrap();
9176                 let chan_1_value_msat = chan_1.context.channel_value_satoshis * 1000;
9177                 assert_eq!(chan_1.context.holder_max_htlc_value_in_flight_msat, (chan_1_value_msat as f64 * 0.02) as u64);
9178
9179                 // Test with the upper bound - 1 of valid values (99%).
9180                 let chan_2 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_99_percent), 10000000, 100000, 42, &config_99_percent, 0, 42, None).unwrap();
9181                 let chan_2_value_msat = chan_2.context.channel_value_satoshis * 1000;
9182                 assert_eq!(chan_2.context.holder_max_htlc_value_in_flight_msat, (chan_2_value_msat as f64 * 0.99) as u64);
9183
9184                 let chan_1_open_channel_msg = chan_1.get_open_channel(ChainHash::using_genesis_block(network));
9185
9186                 // Test that `InboundV1Channel::new` creates a channel with the correct value for
9187                 // `holder_max_htlc_value_in_flight_msat`, when configured with a valid percentage value,
9188                 // which is set to the lower bound - 1 (2%) of the `channel_value`.
9189                 let chan_3 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_2_percent), &channelmanager::provided_init_features(&config_2_percent), &chan_1_open_channel_msg, 7, &config_2_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9190                 let chan_3_value_msat = chan_3.context.channel_value_satoshis * 1000;
9191                 assert_eq!(chan_3.context.holder_max_htlc_value_in_flight_msat, (chan_3_value_msat as f64 * 0.02) as u64);
9192
9193                 // Test with the upper bound - 1 of valid values (99%).
9194                 let chan_4 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_99_percent), &channelmanager::provided_init_features(&config_99_percent), &chan_1_open_channel_msg, 7, &config_99_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9195                 let chan_4_value_msat = chan_4.context.channel_value_satoshis * 1000;
9196                 assert_eq!(chan_4.context.holder_max_htlc_value_in_flight_msat, (chan_4_value_msat as f64 * 0.99) as u64);
9197
9198                 // Test that `OutboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9199                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9200                 let chan_5 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_0_percent), 10000000, 100000, 42, &config_0_percent, 0, 42, None).unwrap();
9201                 let chan_5_value_msat = chan_5.context.channel_value_satoshis * 1000;
9202                 assert_eq!(chan_5.context.holder_max_htlc_value_in_flight_msat, (chan_5_value_msat as f64 * 0.01) as u64);
9203
9204                 // Test that `OutboundV1Channel::new` uses the upper bound of the configurable percentage values
9205                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9206                 // than 100.
9207                 let chan_6 = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&config_101_percent), 10000000, 100000, 42, &config_101_percent, 0, 42, None).unwrap();
9208                 let chan_6_value_msat = chan_6.context.channel_value_satoshis * 1000;
9209                 assert_eq!(chan_6.context.holder_max_htlc_value_in_flight_msat, chan_6_value_msat);
9210
9211                 // Test that `InboundV1Channel::new` uses the lower bound of the configurable percentage values (1%)
9212                 // if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a value less than 1.
9213                 let chan_7 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_0_percent), &channelmanager::provided_init_features(&config_0_percent), &chan_1_open_channel_msg, 7, &config_0_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9214                 let chan_7_value_msat = chan_7.context.channel_value_satoshis * 1000;
9215                 assert_eq!(chan_7.context.holder_max_htlc_value_in_flight_msat, (chan_7_value_msat as f64 * 0.01) as u64);
9216
9217                 // Test that `InboundV1Channel::new` uses the upper bound of the configurable percentage values
9218                 // (100%) if `max_inbound_htlc_value_in_flight_percent_of_channel` is set to a larger value
9219                 // than 100.
9220                 let chan_8 = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&config_101_percent), &channelmanager::provided_init_features(&config_101_percent), &chan_1_open_channel_msg, 7, &config_101_percent, 0, &&logger, /*is_0conf=*/false).unwrap();
9221                 let chan_8_value_msat = chan_8.context.channel_value_satoshis * 1000;
9222                 assert_eq!(chan_8.context.holder_max_htlc_value_in_flight_msat, chan_8_value_msat);
9223         }
9224
9225         #[test]
9226         fn test_configured_holder_selected_channel_reserve_satoshis() {
9227
9228                 // Test that `OutboundV1Channel::new` and `InboundV1Channel::new` create a channel with the correct
9229                 // channel reserves, when `their_channel_reserve_proportional_millionths` is configured.
9230                 test_self_and_counterparty_channel_reserve(10_000_000, 0.02, 0.02);
9231
9232                 // Test with valid but unreasonably high channel reserves
9233                 // Requesting and accepting parties have requested for 49%-49% and 60%-30% channel reserve
9234                 test_self_and_counterparty_channel_reserve(10_000_000, 0.49, 0.49);
9235                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.30);
9236
9237                 // Test with calculated channel reserve less than lower bound
9238                 // i.e `MIN_THEIR_CHAN_RESERVE_SATOSHIS`
9239                 test_self_and_counterparty_channel_reserve(100_000, 0.00002, 0.30);
9240
9241                 // Test with invalid channel reserves since sum of both is greater than or equal
9242                 // to channel value
9243                 test_self_and_counterparty_channel_reserve(10_000_000, 0.50, 0.50);
9244                 test_self_and_counterparty_channel_reserve(10_000_000, 0.60, 0.50);
9245         }
9246
9247         fn test_self_and_counterparty_channel_reserve(channel_value_satoshis: u64, outbound_selected_channel_reserve_perc: f64, inbound_selected_channel_reserve_perc: f64) {
9248                 let fee_est = LowerBoundedFeeEstimator::new(&TestFeeEstimator { fee_est: 15_000 });
9249                 let logger = test_utils::TestLogger::new();
9250                 let secp_ctx = Secp256k1::new();
9251                 let seed = [42; 32];
9252                 let network = Network::Testnet;
9253                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9254                 let outbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9255                 let inbound_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9256
9257
9258                 let mut outbound_node_config = UserConfig::default();
9259                 outbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (outbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9260                 let chan = OutboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, outbound_node_id, &channelmanager::provided_init_features(&outbound_node_config), channel_value_satoshis, 100_000, 42, &outbound_node_config, 0, 42, None).unwrap();
9261
9262                 let expected_outbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * outbound_selected_channel_reserve_perc) as u64);
9263                 assert_eq!(chan.context.holder_selected_channel_reserve_satoshis, expected_outbound_selected_chan_reserve);
9264
9265                 let chan_open_channel_msg = chan.get_open_channel(ChainHash::using_genesis_block(network));
9266                 let mut inbound_node_config = UserConfig::default();
9267                 inbound_node_config.channel_handshake_config.their_channel_reserve_proportional_millionths = (inbound_selected_channel_reserve_perc * 1_000_000.0) as u32;
9268
9269                 if outbound_selected_channel_reserve_perc + inbound_selected_channel_reserve_perc < 1.0 {
9270                         let chan_inbound_node = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false).unwrap();
9271
9272                         let expected_inbound_selected_chan_reserve = cmp::max(MIN_THEIR_CHAN_RESERVE_SATOSHIS, (chan.context.channel_value_satoshis as f64 * inbound_selected_channel_reserve_perc) as u64);
9273
9274                         assert_eq!(chan_inbound_node.context.holder_selected_channel_reserve_satoshis, expected_inbound_selected_chan_reserve);
9275                         assert_eq!(chan_inbound_node.context.counterparty_selected_channel_reserve_satoshis.unwrap(), expected_outbound_selected_chan_reserve);
9276                 } else {
9277                         // Channel Negotiations failed
9278                         let result = InboundV1Channel::<&TestKeysInterface>::new(&&fee_est, &&keys_provider, &&keys_provider, inbound_node_id, &channelmanager::provided_channel_type_features(&inbound_node_config), &channelmanager::provided_init_features(&outbound_node_config), &chan_open_channel_msg, 7, &inbound_node_config, 0, &&logger, /*is_0conf=*/false);
9279                         assert!(result.is_err());
9280                 }
9281         }
9282
9283         #[test]
9284         fn channel_update() {
9285                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9286                 let logger = test_utils::TestLogger::new();
9287                 let secp_ctx = Secp256k1::new();
9288                 let seed = [42; 32];
9289                 let network = Network::Testnet;
9290                 let best_block = BestBlock::from_network(network);
9291                 let chain_hash = ChainHash::using_genesis_block(network);
9292                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9293
9294                 // Create Node A's channel pointing to Node B's pubkey
9295                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9296                 let config = UserConfig::default();
9297                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
9298
9299                 // Create Node B's channel by receiving Node A's open_channel message
9300                 // Make sure A's dust limit is as we expect.
9301                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
9302                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
9303                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false).unwrap();
9304
9305                 // Node B --> Node A: accept channel, explicitly setting B's dust limit.
9306                 let mut accept_channel_msg = node_b_chan.accept_inbound_channel();
9307                 accept_channel_msg.common_fields.dust_limit_satoshis = 546;
9308                 node_a_chan.accept_channel(&accept_channel_msg, &config.channel_handshake_limits, &channelmanager::provided_init_features(&config)).unwrap();
9309                 node_a_chan.context.holder_dust_limit_satoshis = 1560;
9310
9311                 // Node A --> Node B: funding created
9312                 let output_script = node_a_chan.context.get_funding_redeemscript();
9313                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9314                         value: 10000000, script_pubkey: output_script.clone(),
9315                 }]};
9316                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9317                 let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap();
9318                 let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap();
9319
9320                 // Node B --> Node A: funding signed
9321                 let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger);
9322                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
9323
9324                 // Make sure that receiving a channel update will update the Channel as expected.
9325                 let update = ChannelUpdate {
9326                         contents: UnsignedChannelUpdate {
9327                                 chain_hash,
9328                                 short_channel_id: 0,
9329                                 timestamp: 0,
9330                                 flags: 0,
9331                                 cltv_expiry_delta: 100,
9332                                 htlc_minimum_msat: 5,
9333                                 htlc_maximum_msat: MAX_VALUE_MSAT,
9334                                 fee_base_msat: 110,
9335                                 fee_proportional_millionths: 11,
9336                                 excess_data: Vec::new(),
9337                         },
9338                         signature: Signature::from(unsafe { FFISignature::new() })
9339                 };
9340                 assert!(node_a_chan.channel_update(&update).unwrap());
9341
9342                 // The counterparty can send an update with a higher minimum HTLC, but that shouldn't
9343                 // change our official htlc_minimum_msat.
9344                 assert_eq!(node_a_chan.context.holder_htlc_minimum_msat, 1);
9345                 match node_a_chan.context.counterparty_forwarding_info() {
9346                         Some(info) => {
9347                                 assert_eq!(info.cltv_expiry_delta, 100);
9348                                 assert_eq!(info.fee_base_msat, 110);
9349                                 assert_eq!(info.fee_proportional_millionths, 11);
9350                         },
9351                         None => panic!("expected counterparty forwarding info to be Some")
9352                 }
9353
9354                 assert!(!node_a_chan.channel_update(&update).unwrap());
9355         }
9356
9357         #[test]
9358         fn blinding_point_skimmed_fee_malformed_ser() {
9359                 // Ensure that channel blinding points, skimmed fees, and malformed HTLCs are (de)serialized
9360                 // properly.
9361                 let logger = test_utils::TestLogger::new();
9362                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
9363                 let secp_ctx = Secp256k1::new();
9364                 let seed = [42; 32];
9365                 let network = Network::Testnet;
9366                 let best_block = BestBlock::from_network(network);
9367                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
9368
9369                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9370                 let config = UserConfig::default();
9371                 let features = channelmanager::provided_init_features(&config);
9372                 let mut outbound_chan = OutboundV1Channel::<&TestKeysInterface>::new(
9373                         &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &features, 10000000, 100000, 42, &config, 0, 42, None
9374                 ).unwrap();
9375                 let inbound_chan = InboundV1Channel::<&TestKeysInterface>::new(
9376                         &feeest, &&keys_provider, &&keys_provider, node_b_node_id, &channelmanager::provided_channel_type_features(&config),
9377                         &features, &outbound_chan.get_open_channel(ChainHash::using_genesis_block(network)), 7, &config, 0, &&logger, false
9378                 ).unwrap();
9379                 outbound_chan.accept_channel(&inbound_chan.get_accept_channel_message(), &config.channel_handshake_limits, &features).unwrap();
9380                 let tx = Transaction { version: 1, lock_time: LockTime::ZERO, input: Vec::new(), output: vec![TxOut {
9381                         value: 10000000, script_pubkey: outbound_chan.context.get_funding_redeemscript(),
9382                 }]};
9383                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
9384                 let funding_created = outbound_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap().unwrap();
9385                 let mut chan = match inbound_chan.funding_created(&funding_created, best_block, &&keys_provider, &&logger) {
9386                         Ok((chan, _, _)) => chan,
9387                         Err((_, e)) => panic!("{}", e),
9388                 };
9389
9390                 let dummy_htlc_source = HTLCSource::OutboundRoute {
9391                         path: Path {
9392                                 hops: vec![RouteHop {
9393                                         pubkey: test_utils::pubkey(2), channel_features: ChannelFeatures::empty(),
9394                                         node_features: NodeFeatures::empty(), short_channel_id: 0, fee_msat: 0,
9395                                         cltv_expiry_delta: 0, maybe_announced_channel: false,
9396                                 }],
9397                                 blinded_tail: None
9398                         },
9399                         session_priv: test_utils::privkey(42),
9400                         first_hop_htlc_msat: 0,
9401                         payment_id: PaymentId([42; 32]),
9402                 };
9403                 let dummy_outbound_output = OutboundHTLCOutput {
9404                         htlc_id: 0,
9405                         amount_msat: 0,
9406                         payment_hash: PaymentHash([43; 32]),
9407                         cltv_expiry: 0,
9408                         state: OutboundHTLCState::Committed,
9409                         source: dummy_htlc_source.clone(),
9410                         skimmed_fee_msat: None,
9411                         blinding_point: None,
9412                 };
9413                 let mut pending_outbound_htlcs = vec![dummy_outbound_output.clone(); 10];
9414                 for (idx, htlc) in pending_outbound_htlcs.iter_mut().enumerate() {
9415                         if idx % 2 == 0 {
9416                                 htlc.blinding_point = Some(test_utils::pubkey(42 + idx as u8));
9417                         }
9418                         if idx % 3 == 0 {
9419                                 htlc.skimmed_fee_msat = Some(1);
9420                         }
9421                 }
9422                 chan.context.pending_outbound_htlcs = pending_outbound_htlcs.clone();
9423
9424                 let dummy_holding_cell_add_htlc = HTLCUpdateAwaitingACK::AddHTLC {
9425                         amount_msat: 0,
9426                         cltv_expiry: 0,
9427                         payment_hash: PaymentHash([43; 32]),
9428                         source: dummy_htlc_source.clone(),
9429                         onion_routing_packet: msgs::OnionPacket {
9430                                 version: 0,
9431                                 public_key: Ok(test_utils::pubkey(1)),
9432                                 hop_data: [0; 20*65],
9433                                 hmac: [0; 32]
9434                         },
9435                         skimmed_fee_msat: None,
9436                         blinding_point: None,
9437                 };
9438                 let dummy_holding_cell_claim_htlc = HTLCUpdateAwaitingACK::ClaimHTLC {
9439                         payment_preimage: PaymentPreimage([42; 32]),
9440                         htlc_id: 0,
9441                 };
9442                 let dummy_holding_cell_failed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailHTLC {
9443                         htlc_id, err_packet: msgs::OnionErrorPacket { data: vec![42] }
9444                 };
9445                 let dummy_holding_cell_malformed_htlc = |htlc_id| HTLCUpdateAwaitingACK::FailMalformedHTLC {
9446                         htlc_id, failure_code: INVALID_ONION_BLINDING, sha256_of_onion: [0; 32],
9447                 };
9448                 let mut holding_cell_htlc_updates = Vec::with_capacity(12);
9449                 for i in 0..12 {
9450                         if i % 5 == 0 {
9451                                 holding_cell_htlc_updates.push(dummy_holding_cell_add_htlc.clone());
9452                         } else if i % 5 == 1 {
9453                                 holding_cell_htlc_updates.push(dummy_holding_cell_claim_htlc.clone());
9454                         } else if i % 5 == 2 {
9455                                 let mut dummy_add = dummy_holding_cell_add_htlc.clone();
9456                                 if let HTLCUpdateAwaitingACK::AddHTLC {
9457                                         ref mut blinding_point, ref mut skimmed_fee_msat, ..
9458                                 } = &mut dummy_add {
9459                                         *blinding_point = Some(test_utils::pubkey(42 + i));
9460                                         *skimmed_fee_msat = Some(42);
9461                                 } else { panic!() }
9462                                 holding_cell_htlc_updates.push(dummy_add);
9463                         } else if i % 5 == 3 {
9464                                 holding_cell_htlc_updates.push(dummy_holding_cell_malformed_htlc(i as u64));
9465                         } else {
9466                                 holding_cell_htlc_updates.push(dummy_holding_cell_failed_htlc(i as u64));
9467                         }
9468                 }
9469                 chan.context.holding_cell_htlc_updates = holding_cell_htlc_updates.clone();
9470
9471                 // Encode and decode the channel and ensure that the HTLCs within are the same.
9472                 let encoded_chan = chan.encode();
9473                 let mut s = crate::io::Cursor::new(&encoded_chan);
9474                 let mut reader = crate::util::ser::FixedLengthReader::new(&mut s, encoded_chan.len() as u64);
9475                 let features = channelmanager::provided_channel_type_features(&config);
9476                 let decoded_chan = Channel::read(&mut reader, (&&keys_provider, &&keys_provider, 0, &features)).unwrap();
9477                 assert_eq!(decoded_chan.context.pending_outbound_htlcs, pending_outbound_htlcs);
9478                 assert_eq!(decoded_chan.context.holding_cell_htlc_updates, holding_cell_htlc_updates);
9479         }
9480
9481         #[cfg(all(feature = "_test_vectors", not(feature = "grind_signatures")))]
9482         #[test]
9483         fn outbound_commitment_test() {
9484                 use bitcoin::sighash;
9485                 use bitcoin::consensus::encode::serialize;
9486                 use bitcoin::sighash::EcdsaSighashType;
9487                 use bitcoin::hashes::hex::FromHex;
9488                 use bitcoin::hash_types::Txid;
9489                 use bitcoin::secp256k1::Message;
9490                 use crate::sign::{ChannelDerivationParameters, HTLCDescriptor, ecdsa::EcdsaChannelSigner};
9491                 use crate::ln::PaymentPreimage;
9492                 use crate::ln::channel::{HTLCOutputInCommitment ,TxCreationKeys};
9493                 use crate::ln::channel_keys::{DelayedPaymentBasepoint, HtlcBasepoint};
9494                 use crate::ln::chan_utils::{ChannelPublicKeys, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters};
9495                 use crate::util::logger::Logger;
9496                 use crate::sync::Arc;
9497                 use core::str::FromStr;
9498                 use hex::DisplayHex;
9499
9500                 // Test vectors from BOLT 3 Appendices C and F (anchors):
9501                 let feeest = TestFeeEstimator{fee_est: 15000};
9502                 let logger : Arc<dyn Logger> = Arc::new(test_utils::TestLogger::new());
9503                 let secp_ctx = Secp256k1::new();
9504
9505                 let mut signer = InMemorySigner::new(
9506                         &secp_ctx,
9507                         SecretKey::from_slice(&<Vec<u8>>::from_hex("30ff4956bbdd3222d44cc5e8a1261dab1e07957bdac5ae88fe3261ef321f3749").unwrap()[..]).unwrap(),
9508                         SecretKey::from_slice(&<Vec<u8>>::from_hex("0fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap()[..]).unwrap(),
9509                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9510                         SecretKey::from_slice(&<Vec<u8>>::from_hex("3333333333333333333333333333333333333333333333333333333333333333").unwrap()[..]).unwrap(),
9511                         SecretKey::from_slice(&<Vec<u8>>::from_hex("1111111111111111111111111111111111111111111111111111111111111111").unwrap()[..]).unwrap(),
9512
9513                         // These aren't set in the test vectors:
9514                         [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff],
9515                         10_000_000,
9516                         [0; 32],
9517                         [0; 32],
9518                 );
9519
9520                 assert_eq!(signer.pubkeys().funding_pubkey.serialize()[..],
9521                                 <Vec<u8>>::from_hex("023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb").unwrap()[..]);
9522                 let keys_provider = Keys { signer: signer.clone() };
9523
9524                 let counterparty_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
9525                 let mut config = UserConfig::default();
9526                 config.channel_handshake_config.announced_channel = false;
9527                 let mut chan = OutboundV1Channel::<&Keys>::new(&LowerBoundedFeeEstimator::new(&feeest), &&keys_provider, &&keys_provider, counterparty_node_id, &channelmanager::provided_init_features(&config), 10_000_000, 0, 42, &config, 0, 42, None).unwrap(); // Nothing uses their network key in this test
9528                 chan.context.holder_dust_limit_satoshis = 546;
9529                 chan.context.counterparty_selected_channel_reserve_satoshis = Some(0); // Filled in in accept_channel
9530
9531                 let funding_info = OutPoint{ txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 };
9532
9533                 let counterparty_pubkeys = ChannelPublicKeys {
9534                         funding_pubkey: public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13"),
9535                         revocation_basepoint: RevocationBasepoint::from(PublicKey::from_slice(&<Vec<u8>>::from_hex("02466d7fcae563e5cb09a0d1870bb580344804617879a14949cf22285f1bae3f27").unwrap()[..]).unwrap()),
9536                         payment_point: public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"),
9537                         delayed_payment_basepoint: DelayedPaymentBasepoint::from(public_from_secret_hex(&secp_ctx, "1552dfba4f6cf29a62a0af13c8d6981d36d0ef8d61ba10fb0fe90da7634d7e13")),
9538                         htlc_basepoint: HtlcBasepoint::from(public_from_secret_hex(&secp_ctx, "4444444444444444444444444444444444444444444444444444444444444444"))
9539                 };
9540                 chan.context.channel_transaction_parameters.counterparty_parameters = Some(
9541                         CounterpartyChannelTransactionParameters {
9542                                 pubkeys: counterparty_pubkeys.clone(),
9543                                 selected_contest_delay: 144
9544                         });
9545                 chan.context.channel_transaction_parameters.funding_outpoint = Some(funding_info);
9546                 signer.provide_channel_parameters(&chan.context.channel_transaction_parameters);
9547
9548                 assert_eq!(counterparty_pubkeys.payment_point.serialize()[..],
9549                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9550
9551                 assert_eq!(counterparty_pubkeys.funding_pubkey.serialize()[..],
9552                            <Vec<u8>>::from_hex("030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c1").unwrap()[..]);
9553
9554                 assert_eq!(counterparty_pubkeys.htlc_basepoint.to_public_key().serialize()[..],
9555                            <Vec<u8>>::from_hex("032c0b7cf95324a07d05398b240174dc0c2be444d96b159aa6c7f7b1e668680991").unwrap()[..]);
9556
9557                 // We can't just use build_holder_transaction_keys here as the per_commitment_secret is not
9558                 // derived from a commitment_seed, so instead we copy it here and call
9559                 // build_commitment_transaction.
9560                 let delayed_payment_base = &chan.context.holder_signer.as_ref().pubkeys().delayed_payment_basepoint;
9561                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
9562                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
9563                 let htlc_basepoint = &chan.context.holder_signer.as_ref().pubkeys().htlc_basepoint;
9564                 let keys = TxCreationKeys::derive_new(&secp_ctx, &per_commitment_point, delayed_payment_base, htlc_basepoint, &counterparty_pubkeys.revocation_basepoint, &counterparty_pubkeys.htlc_basepoint);
9565
9566                 macro_rules! test_commitment {
9567                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9568                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::only_static_remote_key();
9569                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::only_static_remote_key(), $($remain)*);
9570                         };
9571                 }
9572
9573                 macro_rules! test_commitment_with_anchors {
9574                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $($remain:tt)* ) => {
9575                                 chan.context.channel_transaction_parameters.channel_type_features = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9576                                 test_commitment_common!($counterparty_sig_hex, $sig_hex, $tx_hex, &ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), $($remain)*);
9577                         };
9578                 }
9579
9580                 macro_rules! test_commitment_common {
9581                         ( $counterparty_sig_hex: expr, $sig_hex: expr, $tx_hex: expr, $opt_anchors: expr, {
9582                                 $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), *
9583                         } ) => { {
9584                                 let (commitment_tx, htlcs): (_, Vec<HTLCOutputInCommitment>) = {
9585                                         let mut commitment_stats = chan.context.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger);
9586
9587                                         let htlcs = commitment_stats.htlcs_included.drain(..)
9588                                                 .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None })
9589                                                 .collect();
9590                                         (commitment_stats.tx, htlcs)
9591                                 };
9592                                 let trusted_tx = commitment_tx.trust();
9593                                 let unsigned_tx = trusted_tx.built_transaction();
9594                                 let redeemscript = chan.context.get_funding_redeemscript();
9595                                 let counterparty_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_sig_hex).unwrap()[..]).unwrap();
9596                                 let sighash = unsigned_tx.get_sighash_all(&redeemscript, chan.context.channel_value_satoshis);
9597                                 log_trace!(logger, "unsigned_tx = {}", serialize(&unsigned_tx.transaction).as_hex());
9598                                 assert!(secp_ctx.verify_ecdsa(&sighash, &counterparty_signature, chan.context.counterparty_funding_pubkey()).is_ok(), "verify counterparty commitment sig");
9599
9600                                 let mut per_htlc: Vec<(HTLCOutputInCommitment, Option<Signature>)> = Vec::new();
9601                                 per_htlc.clear(); // Don't warn about excess mut for no-HTLC calls
9602                                 let mut counterparty_htlc_sigs = Vec::new();
9603                                 counterparty_htlc_sigs.clear(); // Don't warn about excess mut for no-HTLC calls
9604                                 $({
9605                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9606                                         per_htlc.push((htlcs[$htlc_idx].clone(), Some(remote_signature)));
9607                                         counterparty_htlc_sigs.push(remote_signature);
9608                                 })*
9609                                 assert_eq!(htlcs.len(), per_htlc.len());
9610
9611                                 let holder_commitment_tx = HolderCommitmentTransaction::new(
9612                                         commitment_tx.clone(),
9613                                         counterparty_signature,
9614                                         counterparty_htlc_sigs,
9615                                         &chan.context.holder_signer.as_ref().pubkeys().funding_pubkey,
9616                                         chan.context.counterparty_funding_pubkey()
9617                                 );
9618                                 let holder_sig = signer.sign_holder_commitment(&holder_commitment_tx, &secp_ctx).unwrap();
9619                                 assert_eq!(Signature::from_der(&<Vec<u8>>::from_hex($sig_hex).unwrap()[..]).unwrap(), holder_sig, "holder_sig");
9620
9621                                 let funding_redeemscript = chan.context.get_funding_redeemscript();
9622                                 let tx = holder_commitment_tx.add_holder_sig(&funding_redeemscript, holder_sig);
9623                                 assert_eq!(serialize(&tx)[..], <Vec<u8>>::from_hex($tx_hex).unwrap()[..], "tx");
9624
9625                                 // ((htlc, counterparty_sig), (index, holder_sig))
9626                                 let mut htlc_counterparty_sig_iter = holder_commitment_tx.counterparty_htlc_sigs.iter();
9627
9628                                 $({
9629                                         log_trace!(logger, "verifying htlc {}", $htlc_idx);
9630                                         let remote_signature = Signature::from_der(&<Vec<u8>>::from_hex($counterparty_htlc_sig_hex).unwrap()[..]).unwrap();
9631
9632                                         let ref htlc = htlcs[$htlc_idx];
9633                                         let mut htlc_tx = chan_utils::build_htlc_transaction(&unsigned_tx.txid, chan.context.feerate_per_kw,
9634                                                 chan.context.get_counterparty_selected_contest_delay().unwrap(),
9635                                                 &htlc, $opt_anchors, &keys.broadcaster_delayed_payment_key, &keys.revocation_key);
9636                                         let htlc_redeemscript = chan_utils::get_htlc_redeemscript(&htlc, $opt_anchors, &keys);
9637                                         let htlc_sighashtype = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { EcdsaSighashType::SinglePlusAnyoneCanPay } else { EcdsaSighashType::All };
9638                                         let htlc_sighash = Message::from_slice(&sighash::SighashCache::new(&htlc_tx).segwit_signature_hash(0, &htlc_redeemscript, htlc.amount_msat / 1000, htlc_sighashtype).unwrap()[..]).unwrap();
9639                                         assert!(secp_ctx.verify_ecdsa(&htlc_sighash, &remote_signature, &keys.countersignatory_htlc_key.to_public_key()).is_ok(), "verify counterparty htlc sig");
9640
9641                                         let mut preimage: Option<PaymentPreimage> = None;
9642                                         if !htlc.offered {
9643                                                 for i in 0..5 {
9644                                                         let out = PaymentHash(Sha256::hash(&[i; 32]).to_byte_array());
9645                                                         if out == htlc.payment_hash {
9646                                                                 preimage = Some(PaymentPreimage([i; 32]));
9647                                                         }
9648                                                 }
9649
9650                                                 assert!(preimage.is_some());
9651                                         }
9652
9653                                         let htlc_counterparty_sig = htlc_counterparty_sig_iter.next().unwrap();
9654                                         let htlc_holder_sig = signer.sign_holder_htlc_transaction(&htlc_tx, 0, &HTLCDescriptor {
9655                                                 channel_derivation_parameters: ChannelDerivationParameters {
9656                                                         value_satoshis: chan.context.channel_value_satoshis,
9657                                                         keys_id: chan.context.channel_keys_id,
9658                                                         transaction_parameters: chan.context.channel_transaction_parameters.clone(),
9659                                                 },
9660                                                 commitment_txid: trusted_tx.txid(),
9661                                                 per_commitment_number: trusted_tx.commitment_number(),
9662                                                 per_commitment_point: trusted_tx.per_commitment_point(),
9663                                                 feerate_per_kw: trusted_tx.feerate_per_kw(),
9664                                                 htlc: htlc.clone(),
9665                                                 preimage: preimage.clone(),
9666                                                 counterparty_sig: *htlc_counterparty_sig,
9667                                         }, &secp_ctx).unwrap();
9668                                         let num_anchors = if $opt_anchors.supports_anchors_zero_fee_htlc_tx() { 2 } else { 0 };
9669                                         assert_eq!(htlc.transaction_output_index, Some($htlc_idx + num_anchors), "output index");
9670
9671                                         let signature = Signature::from_der(&<Vec<u8>>::from_hex($htlc_sig_hex).unwrap()[..]).unwrap();
9672                                         assert_eq!(signature, htlc_holder_sig, "htlc sig");
9673                                         let trusted_tx = holder_commitment_tx.trust();
9674                                         htlc_tx.input[0].witness = trusted_tx.build_htlc_input_witness($htlc_idx, htlc_counterparty_sig, &htlc_holder_sig, &preimage);
9675                                         log_trace!(logger, "htlc_tx = {}", serialize(&htlc_tx).as_hex());
9676                                         assert_eq!(serialize(&htlc_tx)[..], <Vec<u8>>::from_hex($htlc_tx_hex).unwrap()[..], "htlc tx");
9677                                 })*
9678                                 assert!(htlc_counterparty_sig_iter.next().is_none());
9679                         } }
9680                 }
9681
9682                 // anchors: simple commitment tx with no HTLCs and single anchor
9683                 test_commitment_with_anchors!("30440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a8658",
9684                                                  "3044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778",
9685                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f10529800000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022007cf6b405e9c9b4f527b0ecad9d8bb661fabb8b12abf7d1c0b3ad1855db3ed490220616d5c1eeadccc63bd775a131149455d62d95a42c2a1b01cc7821fc42dce7778014730440220655bf909fb6fa81d086f1336ac72c97906dce29d1b166e305c99152d810e26e1022051f577faa46412c46707aaac46b65d50053550a66334e00a44af2706f27a865801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9686
9687                 // simple commitment tx with no HTLCs
9688                 chan.context.value_to_self_msat = 7000000000;
9689
9690                 test_commitment!("3045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b0",
9691                                                  "30440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae055647142",
9692                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48454a56a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004730440220616210b2cc4d3afb601013c373bbd8aac54febd9f15400379a8cb65ce7deca60022034236c010991beb7ff770510561ae8dc885b8d38d1947248c38f2ae05564714201483045022100c3127b33dcc741dd6b05b1e63cbd1a9a7d816f37af9b6756fa2376b056f032370220408b96279808fe57eb7e463710804cdf4f108388bc5cf722d8c848d2c7f9f3b001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9693
9694                 // anchors: simple commitment tx with no HTLCs
9695                 test_commitment_with_anchors!("3045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f3",
9696                                                  "30450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f7",
9697                                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a508b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008266ac6db5ea71aac3c95d97b0e172ff596844851a3216eb88382a8dddfd33d2022050e240974cfd5d708708b4365574517c18e7ae535ef732a3484d43d0d82be9f701483045022100f89034eba16b2be0e5581f750a0a6309192b75cce0f202f0ee2b4ec0cc394850022076c65dc507fe42276152b7a3d90e961e678adbe966e916ecfe85e64d430e75f301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
9698
9699                 chan.context.pending_inbound_htlcs.push({
9700                         let mut out = InboundHTLCOutput{
9701                                 htlc_id: 0,
9702                                 amount_msat: 1000000,
9703                                 cltv_expiry: 500,
9704                                 payment_hash: PaymentHash([0; 32]),
9705                                 state: InboundHTLCState::Committed,
9706                         };
9707                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap()).to_byte_array();
9708                         out
9709                 });
9710                 chan.context.pending_inbound_htlcs.push({
9711                         let mut out = InboundHTLCOutput{
9712                                 htlc_id: 1,
9713                                 amount_msat: 2000000,
9714                                 cltv_expiry: 501,
9715                                 payment_hash: PaymentHash([0; 32]),
9716                                 state: InboundHTLCState::Committed,
9717                         };
9718                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
9719                         out
9720                 });
9721                 chan.context.pending_outbound_htlcs.push({
9722                         let mut out = OutboundHTLCOutput{
9723                                 htlc_id: 2,
9724                                 amount_msat: 2000000,
9725                                 cltv_expiry: 502,
9726                                 payment_hash: PaymentHash([0; 32]),
9727                                 state: OutboundHTLCState::Committed,
9728                                 source: HTLCSource::dummy(),
9729                                 skimmed_fee_msat: None,
9730                                 blinding_point: None,
9731                         };
9732                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0202020202020202020202020202020202020202020202020202020202020202").unwrap()).to_byte_array();
9733                         out
9734                 });
9735                 chan.context.pending_outbound_htlcs.push({
9736                         let mut out = OutboundHTLCOutput{
9737                                 htlc_id: 3,
9738                                 amount_msat: 3000000,
9739                                 cltv_expiry: 503,
9740                                 payment_hash: PaymentHash([0; 32]),
9741                                 state: OutboundHTLCState::Committed,
9742                                 source: HTLCSource::dummy(),
9743                                 skimmed_fee_msat: None,
9744                                 blinding_point: None,
9745                         };
9746                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0303030303030303030303030303030303030303030303030303030303030303").unwrap()).to_byte_array();
9747                         out
9748                 });
9749                 chan.context.pending_inbound_htlcs.push({
9750                         let mut out = InboundHTLCOutput{
9751                                 htlc_id: 4,
9752                                 amount_msat: 4000000,
9753                                 cltv_expiry: 504,
9754                                 payment_hash: PaymentHash([0; 32]),
9755                                 state: InboundHTLCState::Committed,
9756                         };
9757                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0404040404040404040404040404040404040404040404040404040404040404").unwrap()).to_byte_array();
9758                         out
9759                 });
9760
9761                 // commitment tx with all five HTLCs untrimmed (minimum feerate)
9762                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9763                 chan.context.feerate_per_kw = 0;
9764
9765                 test_commitment!("3044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e5",
9766                                  "304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea",
9767                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e0a06a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402206fc2d1f10ea59951eefac0b4b7c396a3c3d87b71ff0b019796ef4535beaf36f902201765b0181e514d04f4c8ad75659d7037be26cdb3f8bb6f78fe61decef484c3ea01473044022009b048187705a8cbc9ad73adbe5af148c3d012e1f067961486c822c7af08158c022006d66f3704cfab3eb2dc49dae24e4aa22a6910fc9b424007583204e3621af2e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9768
9769                                   { 0,
9770                                   "3045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b",
9771                                   "30440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce",
9772                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b00000000000000000001e8030000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d9e29616b8f3959f1d3d7f7ce893ffedcdc407717d0de8e37d808c91d3a7c50d022078c3033f6d00095c8720a4bc943c1b45727818c082e4e3ddbc6d3116435b624b014730440220636de5682ef0c5b61f124ec74e8aa2461a69777521d6998295dcea36bc3338110220165285594b23c50b28b82df200234566628a27bcd17f7f14404bd865354eb3ce012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9773
9774                                   { 1,
9775                                   "30440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f896004",
9776                                   "3045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f",
9777                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b01000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220649fe8b20e67e46cbb0d09b4acea87dbec001b39b08dee7bdd0b1f03922a8640022037c462dff79df501cecfdb12ea7f4de91f99230bb544726f6e04527b1f89600401483045022100803159dee7935dba4a1d36a61055ce8fd62caa528573cc221ae288515405a252022029c59e7cffce374fe860100a4a63787e105c3cf5156d40b12dd53ff55ac8cf3f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9778
9779                                   { 2,
9780                                   "30440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d43352",
9781                                   "3045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa",
9782                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b02000000000000000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220770fc321e97a19f38985f2e7732dd9fe08d16a2efa4bcbc0429400a447faf49102204d40b417f3113e1b0944ae0986f517564ab4acd3d190503faf97a6e420d4335201483045022100a437cc2ce77400ecde441b3398fea3c3ad8bdad8132be818227fe3c5b8345989022069d45e7fa0ae551ec37240845e2c561ceb2567eacf3076a6a43a502d05865faa012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9783
9784                                   { 3,
9785                                   "304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c1363",
9786                                   "304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac7487",
9787                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b03000000000000000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207bcbf4f60a9829b05d2dbab84ed593e0291836be715dc7db6b72a64caf646af802201e489a5a84f7c5cc130398b841d138d031a5137ac8f4c49c770a4959dc3c13630147304402203121d9b9c055f354304b016a36662ee99e1110d9501cb271b087ddb6f382c2c80220549882f3f3b78d9c492de47543cb9a697cecc493174726146536c5954dac748701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9788
9789                                   { 4,
9790                                   "3044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b87",
9791                                   "3045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95",
9792                                   "02000000000101ab84ff284f162cfbfef241f853b47d4368d171f9e2a1445160cd591c4c7d882b04000000000000000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022076dca5cb81ba7e466e349b7128cdba216d4d01659e29b96025b9524aaf0d1899022060de85697b88b21c749702b7d2cfa7dfeaa1f472c8f1d7d9c23f2bf968464b8701483045022100d9080f103cc92bac15ec42464a95f070c7fb6925014e673ee2ea1374d36a7f7502200c65294d22eb20d48564954d5afe04a385551919d8b2ddb4ae2459daaeee1d95012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9793                 } );
9794
9795                 // commitment tx with seven outputs untrimmed (maximum feerate)
9796                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9797                 chan.context.feerate_per_kw = 647;
9798
9799                 test_commitment!("3045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee",
9800                                  "30450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb7",
9801                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8007e80300000000000022002052bfef0479d7b293c27e0f1eb294bea154c63a3294ef092c19af51409bce0e2ad007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484e09c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009ec15c687898bb4da8b3a833e5ab8bfc51ec6e9202aaa8e66611edfd4a85ed1102203d7183e45078b9735c93450bc3415d3e5a8c576141a711ec6ddcb4a893926bb701483045022100a135f9e8a5ed25f7277446c67956b00ce6f610ead2bdec2c2f686155b7814772022059f1f6e1a8b336a68efcc1af3fe4d422d4827332b5b067501b099c47b7b5b5ee01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9802
9803                                   { 0,
9804                                   "30450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f",
9805                                   "30440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b",
9806                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221008437627f9ad84ac67052e2a414a4367b8556fd1f94d8b02590f89f50525cd33502205b9c21ff6e7fc864f2352746ad8ba59182510819acb644e25b8a12fc37bbf24f014730440220344b0deb055230d01703e6c7acd45853c4af2328b49b5d8af4f88a060733406602202ea64f2a43d5751edfe75503cbc35a62e3141b5ed032fa03360faf4ca66f670b012000000000000000000000000000000000000000000000000000000000000000008a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a914b8bcb07f6344b42ab04250c86a6e8b75d3fdbbc688527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f401b175ac686800000000" },
9807
9808                                   { 1,
9809                                   "304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c",
9810                                   "30450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f",
9811                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe0100000000000000000124060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205a67f92bf6845cf2892b48d874ac1daf88a36495cf8a06f93d83180d930a6f75022031da1621d95c3f335cc06a3056cf960199dae600b7cf89088f65fc53cdbef28c014830450221009e5e3822b0185c6799a95288c597b671d6cc69ab80f43740f00c6c3d0752bdda02206da947a74bd98f3175324dc56fdba86cc783703a120a6f0297537e60632f4c7f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9812
9813                                   { 2,
9814                                   "30440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c8",
9815                                   "3045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673",
9816                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe020000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220437e21766054a3eef7f65690c5bcfa9920babbc5af92b819f772f6ea96df6c7402207173622024bd97328cfb26c6665e25c2f5d67c319443ccdc60c903217005d8c801483045022100fcfc47e36b712624677626cef3dc1d67f6583bd46926a6398fe6b00b0c9a37760220525788257b187fc775c6370d04eadf34d06f3650a63f8df851cee0ecb47a1673012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9817
9818                                   { 3,
9819                                   "304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c64",
9820                                   "3045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee",
9821                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe030000000000000000010c0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207436e10737e4df499fc051686d3e11a5bb2310e4d1f1e691d287cef66514791202207cb58e71a6b7a42dd001b7e3ae672ea4f71ea3e1cd412b742e9124abb0739c6401483045022100e78211b8409afb7255ffe37337da87f38646f1faebbdd61bc1920d69e3ead67a02201a626305adfcd16bfb7e9340928d9b6305464eab4aa4c4a3af6646e9b9f69dee01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9822
9823                                   { 4,
9824                                   "30450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca",
9825                                   "3044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9",
9826                                   "020000000001012cfb3e4788c206881d38f2996b6cb2109b5935acb527d14bdaa7b908afa9b2fe04000000000000000001da0d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004830450221009acd6a827a76bfee50806178dfe0495cd4e1d9c58279c194c7b01520fe68cb8d022024d439047c368883e570997a7d40f0b430cb5a742f507965e7d3063ae3feccca01473044022048762cf546bbfe474f1536365ea7c416e3c0389d60558bc9412cb148fb6ab68202207215d7083b75c96ff9d2b08c59c34e287b66820f530b486a9aa4cdd9c347d5b9012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9827                 } );
9828
9829                 // commitment tx with six outputs untrimmed (minimum feerate)
9830                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9831                 chan.context.feerate_per_kw = 648;
9832
9833                 test_commitment!("304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e5",
9834                                  "3045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e9",
9835                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4844e9d6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b15f72908ba3382a34ca5b32519240a22300cc6015b6f9418635fb41f3d01d8802207adb331b9ed1575383dca0f2355e86c173802feecf8298fbea53b9d4610583e90147304402203948f900a5506b8de36a4d8502f94f21dd84fd9c2314ab427d52feaa7a0a19f2022059b6a37a4adaa2c5419dc8aea63c6e2a2ec4c4bde46207f6dc1fcd22152fc6e501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9836
9837                                   { 0,
9838                                   "3045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e0",
9839                                   "304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec6",
9840                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10000000000000000000123060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100a031202f3be94678f0e998622ee95ebb6ada8da1e9a5110228b5e04a747351e4022010ca6a21e18314ed53cfaae3b1f51998552a61a468e596368829a50ce40110e00148304502210097e1873b57267730154595187a34949d3744f52933070c74757005e61ce2112e02204ecfba2aa42d4f14bdf8bad4206bb97217b702e6c433e0e1b0ce6587e6d46ec601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9841
9842                                   { 1,
9843                                   "304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d384124",
9844                                   "3044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae",
9845                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf10100000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202361012a634aee7835c5ecdd6413dcffa8f404b7e77364c792cff984e4ee71e90220715c5e90baa08daa45a7439b1ee4fa4843ed77b19c058240b69406606d38412401473044022019de73b00f1d818fb388e83b2c8c31f6bce35ac624e215bc12f88f9dc33edf48022006ff814bb9f700ee6abc3294e146fac3efd4f13f0005236b41c0a946ee00c9ae012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9846
9847                                   { 2,
9848                                   "304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc625532989",
9849                                   "3045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e2260796",
9850                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf1020000000000000000010b0a0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207e8e82cd71ed4febeb593732c260456836e97d81896153ecd2b3cf320ca6861702202dd4a30f68f98ced7cc56a36369ac1fdd978248c5ff4ed204fc00cc62553298901483045022100bd0be6100c4fd8f102ec220e1b053e4c4e2ecca25615490150007b40d314dc3902201a1e0ea266965b43164d9e6576f58fa6726d42883dd1c3996d2925c2e226079601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9851
9852                                   { 3,
9853                                   "3044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be",
9854                                   "3045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6",
9855                                   "020000000001010f44041fdfba175987cf4e6135ba2a154e3b7fb96483dc0ed5efc0678e5b6bf103000000000000000001d90d0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022024cd52e4198c8ae0e414a86d86b5a65ea7450f2eb4e783096736d93395eca5ce022078f0094745b45be4d4b2b04dd5978c9e66ba49109e5704403e84aaf5f387d6be01483045022100bbfb9d0a946d420807c86e985d636cceb16e71c3694ed186316251a00cbd807202207773223f9a337e145f64673825be9b30d07ef1542c82188b264bedcf7cda78c6012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9856                 } );
9857
9858                 // anchors: commitment tx with six outputs untrimmed (minimum dust limit)
9859                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9860                 chan.context.feerate_per_kw = 645;
9861                 chan.context.holder_dust_limit_satoshis = 1001;
9862
9863                 test_commitment_with_anchors!("3044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc312",
9864                                  "3045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d0051",
9865                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80084a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d0070000000000002200203e68115ae0b15b8de75b6c6bc9af5ac9f01391544e0870dae443a1e8fe7837ead007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5eb80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994abc996a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d57697c707b6f6d053febf24b98e8989f186eea42e37e9e91663ec2c70bb8f70022079b0715a472118f262f43016a674f59c015d9cafccec885968e76d9d9c5d005101473044022025d97466c8049e955a5afce28e322f4b34d2561118e52332fb400f9b908cc0a402205dc6fba3a0d67ee142c428c535580cd1f2ff42e2f89b47e0c8a01847caffc31201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9866
9867                                   { 0,
9868                                   "3045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc282",
9869                                   "3045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef09",
9870                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320002000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e04d160a326432659fe9fb127304c1d348dfeaba840081bdc57d8efd902a48d8022008a824e7cf5492b97e4d9e03c06a09f822775a44f6b5b2533a2088904abfc28283483045022100b7c49846466b13b190ff739bbe3005c105482fc55539e55b1c561f76b6982b6c02200e5c35808619cf543c8405cff9fedd25f333a4a2f6f6d5e8af8150090c40ef0901008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6851b27568f6010000" },
9871
9872                                   { 1,
9873                                   "3045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e",
9874                                   "3045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac",
9875                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320003000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100fbdc3c367ce3bf30796025cc590ee1f2ce0e72ae1ac19f5986d6d0a4fc76211f02207e45ae9267e8e820d188569604f71d1abd11bd385d58853dd7dc034cdb3e9a6e83483045022100d29330f24db213b262068706099b39c15fa7e070c3fcdf8836c09723fc4d365602203ce57d01e9f28601e461a0b5c4a50119b270bde8b70148d133a6849c70b115ac012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
9876
9877                                   { 2,
9878                                   "3044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c1",
9879                                   "304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e",
9880                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320004000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022066c5ef625cee3ddd2bc7b6bfb354b5834cf1cc6d52dd972fb41b7b225437ae4a022066cb85647df65c6b87a54e416dcdcca778a776c36a9643d2b5dc793c9b29f4c18347304402202d4ce515cd9000ec37575972d70b8d24f73909fb7012e8ebd8c2066ef6fe187902202830b53e64ea565fecd0f398100691da6bb2a5cf9bb0d1926f1d71d05828a11e01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
9881
9882                                   { 3,
9883                                   "3044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc615",
9884                                   "3045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226",
9885                                   "02000000000101104f394af4c4fad78337f95e3e9f802f4c0d86ab231853af09b285348561320005000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022022c7e11595c53ee89a57ca76baf0aed730da035952d6ab3fe6459f5eff3b337a022075e10cc5f5fd724a35ce4087a5d03cd616698626c69814032132b50bb97dc61583483045022100b20cd63e0587d1711beaebda4730775c4ac8b8b2ec78fe18a0c44c3f168c25230220079abb7fc4924e2fca5950842e5b9e416735585026914570078c4ef62f286226012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
9886                 } );
9887
9888                 // commitment tx with six outputs untrimmed (maximum feerate)
9889                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9890                 chan.context.feerate_per_kw = 2069;
9891                 chan.context.holder_dust_limit_satoshis = 546;
9892
9893                 test_commitment!("304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc",
9894                                  "3045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e33",
9895                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8006d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2db80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48477956a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100ad9a9bbbb75d506ca3b716b336ee3cf975dd7834fcf129d7dd188146eb58a8b4022061a759ee417339f7fe2ea1e8deb83abb6a74db31a09b7648a932a639cda23e330148304502210090b96a2498ce0c0f2fadbec2aab278fed54c1a7838df793ec4d2c78d96ec096202204fdd439c50f90d483baa7b68feeef4bd33bc277695405447bcd0bfb2ca34d7bc01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9896
9897                                   { 0,
9898                                   "3045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f699",
9899                                   "3045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d",
9900                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0000000000000000000175020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f33513ee38abf1c582876f921f8fddc06acff48e04515532a32d3938de938ffd02203aa308a2c1863b7d6fdf53159a1465bf2e115c13152546cc5d74483ceaa7f69901483045022100a637902a5d4c9ba9e7c472a225337d5aac9e2e3f6744f76e237132e7619ba0400220035c60d784a031c0d9f6df66b7eab8726a5c25397399ee4aa960842059eb3f9d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9901
9902                                   { 1,
9903                                   "3045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df",
9904                                   "3045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61",
9905                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d0100000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ce07682cf4b90093c22dc2d9ab2a77ad6803526b655ef857221cc96af5c9e0bf02200f501cee22e7a268af40b555d15a8237c9f36ad67ef1841daf9f6a0267b1e6df01483045022100e57e46234f8782d3ff7aa593b4f7446fb5316c842e693dc63ee324fd49f6a1c302204a2f7b44c48bd26e1554422afae13153eb94b29d3687b733d18930615fb2db61012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
9906
9907                                   { 2,
9908                                   "3045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e0",
9909                                   "3044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c18",
9910                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d020000000000000000015d060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e3e35492e55f82ec0bc2f317ffd7a486d1f7024330fe9743c3559fc39f32ef0c02203d1d4db651fc388a91d5ad8ecdd8e83673063bc8eefe27cfd8c189090e3a23e001473044022068613fb1b98eb3aec7f44c5b115b12343c2f066c4277c82b5f873dfe68f37f50022028109b4650f3f528ca4bfe9a467aff2e3e43893b61b5159157119d5d95cf1c1801008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9911
9912                                   { 3,
9913                                   "304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df",
9914                                   "3045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33",
9915                                   "02000000000101adbe717a63fb658add30ada1e6e12ed257637581898abe475c11d7bbcd65bd4d03000000000000000001f2090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402207475aeb0212ef9bf5130b60937817ad88c9a87976988ef1f323f026148cc4a850220739fea17ad3257dcad72e509c73eebe86bee30b178467b9fdab213d631b109df01483045022100d315522e09e7d53d2a659a79cb67fef56d6c4bddf3f46df6772d0d20a7beb7c8022070bcc17e288607b6a72be0bd83368bb6d53488db266c1cdb4d72214e4f02ac33012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9916                 } );
9917
9918                 // commitment tx with five outputs untrimmed (minimum feerate)
9919                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9920                 chan.context.feerate_per_kw = 2070;
9921
9922                 test_commitment!("304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c",
9923                                  "3044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c1",
9924                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484da966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022001014419b5ba00e083ac4e0a85f19afc848aacac2d483b4b525d15e2ae5adbfe022015ebddad6ee1e72b47cb09f3e78459da5be01ccccd95dceca0e056a00cc773c10147304402204ca1ba260dee913d318271d86e10ca0f5883026fb5653155cff600fb40895223022037b145204b7054a40e08bb1fefbd826f827b40838d3e501423bcc57924bcb50c01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9925
9926                                   { 0,
9927                                   "304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b",
9928                                   "30440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e5",
9929                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff0000000000000000000174020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402205f6b6d12d8d2529fb24f4445630566cf4abbd0f9330ab6c2bdb94222d6a2a0c502202f556258ae6f05b193749e4c541dfcc13b525a5422f6291f073f15617ba8579b014730440220150b11069454da70caf2492ded9e0065c9a57f25ac2a4c52657b1d15b6c6ed85022068a38833b603c8892717206383611bad210f1cbb4b1f87ea29c6c65b9e1cb3e501008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9930
9931                                   { 1,
9932                                   "3045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546",
9933                                   "30450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c6",
9934                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff010000000000000000015c060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f960dfb1c9aee7ce1437efa65b523e399383e8149790e05d8fed27ff6e42fe0002202fe8613e062ffe0b0c518cc4101fba1c6de70f64a5bcc7ae663f2efae43b8546014830450221009a6ed18e6873bc3644332a6ee21c152a5b102821865350df7a8c74451a51f9f2022050d801fb4895d7d7fbf452824c0168347f5c0cbe821cf6a97a63af5b8b2563c601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9935
9936                                   { 2,
9937                                   "3045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504",
9938                                   "30440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502",
9939                                   "02000000000101403ad7602b43293497a3a2235a12ecefda4f3a1f1d06e49b1786d945685de1ff02000000000000000001f1090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100ae5fc7717ae684bc1fcf9020854e5dbe9842c9e7472879ac06ff95ac2bb10e4e022057728ada4c00083a3e65493fb5d50a232165948a1a0f530ef63185c2c8c56504014730440220408ad3009827a8fccf774cb285587686bfb2ed041f89a89453c311ce9c8ee0f902203c7392d9f8306d3a46522a66bd2723a7eb2628cb2d9b34d4c104f1766bf37502012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9940                 } );
9941
9942                 // commitment tx with five outputs untrimmed (maximum feerate)
9943                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9944                 chan.context.feerate_per_kw = 2194;
9945
9946                 test_commitment!("304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb3",
9947                                  "3044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d398",
9948                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020403d394747cae42e98ff01734ad5c08f82ba123d3d9a620abda88989651e2ab5b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e48440966a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022072c2e2b1c899b2242656a537dde2892fa3801be0d6df0a87836c550137acde8302201654aa1974d37a829083c3ba15088689f30b56d6a4f6cb14c7bad0ee3116d3980147304402204bb3d6e279d71d9da414c82de42f1f954267c762b2e2eb8b76bc3be4ea07d4b0022014febc009c5edc8c3fc5d94015de163200f780046f1c293bfed8568f08b70fb301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9949
9950                                   { 0,
9951                                   "3045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de8450",
9952                                   "304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e0154301",
9953                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100939726680351a7856c1bc386d4a1f422c7d29bd7b56afc139570f508474e6c40022023175a799ccf44c017fbaadb924c40b2a12115a5b7d0dfd3228df803a2de84500148304502210099c98c2edeeee6ec0fb5f3bea8b79bb016a2717afa9b5072370f34382de281d302206f5e2980a995e045cf90a547f0752a7ee99d48547bc135258fe7bc07e015430101008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a914b43e1b38138a41b37f7cd9a1d274bc63e3a9b5d188ac6868f6010000" },
9954
9955                                   { 1,
9956                                   "3044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a0",
9957                                   "3045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b77",
9958                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd010000000000000000010a060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022021bb883bf324553d085ba2e821cad80c28ef8b303dbead8f98e548783c02d1600220638f9ef2a9bba25869afc923f4b5dc38be3bb459f9efa5d869392d5f7779a4a001483045022100fd85bd7697b89c08ec12acc8ba89b23090637d83abd26ca37e01ae93e67c367302202b551fe69386116c47f984aab9c8dfd25d864dcde5d3389cfbef2447a85c4b7701008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9959
9960                                   { 2,
9961                                   "3045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b",
9962                                   "30450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3",
9963                                   "02000000000101153cd825fdb3aa624bfe513e8031d5d08c5e582fb3d1d1fe8faf27d3eed410cd020000000000000000019a090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c9e6f0454aa598b905a35e641a70cc9f67b5f38cc4b00843a041238c4a9f1c4a0220260a2822a62da97e44583e837245995ca2e36781769c52f19e498efbdcca262b014830450221008a9f2ea24cd455c2b64c1472a5fa83865b0a5f49a62b661801e884cf2849af8302204d44180e50bf6adfcf1c1e581d75af91aba4e28681ce4a5ee5f3cbf65eca10f3012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9964                 } );
9965
9966                 // commitment tx with four outputs untrimmed (minimum feerate)
9967                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9968                 chan.context.feerate_per_kw = 2195;
9969
9970                 test_commitment!("304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce403",
9971                                  "3044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d1767",
9972                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484b8976a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400473044022044d592025b610c0d678f65032e87035cdfe89d1598c522cc32524ae8172417c30220749fef9d5b2ae8cdd91ece442ba8809bc891efedae2291e578475f97715d17670147304402201a8c1b1f9671cd9e46c7323a104d7047cc48d3ee80d40d4512e0c72b8dc65666022066d7f9a2ce18c9eb22d2739ffcce05721c767f9b607622a31b6ea5793ddce40301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9973
9974                                   { 0,
9975                                   "3045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e",
9976                                   "3045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d76",
9977                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900000000000000000000109060000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100e57b845066a06ee7c2cbfc29eabffe52daa9bf6f6de760066d04df9f9b250e0002202ffb197f0e6e0a77a75a9aff27014bd3de83b7f748d7efef986abe655e1dd50e01483045022100ecc8c6529d0b2316d046f0f0757c1e1c25a636db168ec4f3aa1b9278df685dc0022067ae6b65e936f1337091f7b18a15935b608c5f2cdddb2f892ed0babfdd376d7601008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
9978
9979                                   { 1,
9980                                   "3045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a",
9981                                   "3044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82",
9982                                   "020000000001018130a10f09b13677ba2885a8bca32860f3a952e5912b829a473639b5a2c07b900100000000000000000199090000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100d193b7ecccad8057571620a0b1ffa6c48e9483311723b59cf536043b20bc51550220546d4bd37b3b101ecda14f6c907af46ec391abce1cd9c7ce22b1a62b534f2f2a01473044022014d66f11f9cacf923807eba49542076c5fe5cccf252fb08fe98c78ef3ca6ab5402201b290dbe043cc512d9d78de074a5a129b8759bc6a6c546b190d120b690bd6e82012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
9983                 } );
9984
9985                 // anchors: commitment tx with four outputs untrimmed (minimum dust limit)
9986                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
9987                 chan.context.feerate_per_kw = 2185;
9988                 chan.context.holder_dust_limit_satoshis = 2001;
9989                 let cached_channel_type = chan.context.channel_type;
9990                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
9991
9992                 test_commitment_with_anchors!("3044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c0",
9993                                  "3045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd5",
9994                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80064a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994b80b000000000000220020f96d0334feb64a4f40eb272031d07afcb038db56aa57446d60308c9f8ccadef9a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ac5916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100cd8479cfe1edb1e5a1d487391e0451a469c7171e51e680183f19eb4321f20e9b02204eab7d5a6384b1b08e03baa6e4d9748dfd2b5ab2bae7e39604a0d0055bbffdd501473044022040f63a16148cf35c8d3d41827f5ae7f7c3746885bb64d4d1b895892a83812b3e02202fcf95c2bf02c466163b3fa3ced6a24926fbb4035095a96842ef516e86ba54c001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
9995
9996                                   { 0,
9997                                   "304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb",
9998                                   "30440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f",
9999                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc02000000000100000001b80b0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206870514a72ad6e723ff7f1e0370d7a33c1cd2a0b9272674143ebaf6a1d02dee102205bd953c34faf5e7322e9a1c0103581cb090280fda4f1039ee8552668afa90ebb834730440220669de9ca7910eff65a7773ebd14a9fc371fe88cde5b8e2a81609d85c87ac939b02201ac29472fa4067322e92d75b624942d60be5050139b20bb363db75be79eb946f01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6851b27568f7010000" },
10000
10001                                   { 1,
10002                                   "3045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd7271",
10003                                   "3045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688",
10004                                   "02000000000101ac13a7715f80b8e52dda43c6929cade5521bdced3a405da02b443f1ffb1e33cc03000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100949e8dd938da56445b1cdfdebe1b7efea086edd05d89910d205a1e2e033ce47102202cbd68b5262ab144d9ec12653f87dfb0bb6bd05d1f58ae1e523f028eaefd727183483045022100e3104ed8b239f8019e5f0a1a73d7782a94a8c36e7984f476c3a0b3cb0e62e27902207e3d52884600985f8a2098e53a5c30dd6a5e857733acfaa07ab2162421ed2688012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10005                 } );
10006
10007                 // commitment tx with four outputs untrimmed (maximum feerate)
10008                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10009                 chan.context.feerate_per_kw = 3702;
10010                 chan.context.holder_dust_limit_satoshis = 546;
10011                 chan.context.channel_type = cached_channel_type.clone();
10012
10013                 test_commitment!("304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee40169",
10014                                  "3045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf75",
10015                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8004b80b000000000000220020c20b5d1f8584fd90443e7b7b720136174fa4b9333c261d04dbbd012635c0f419a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4846f916a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100e5efb73c32d32da2d79702299b6317de6fb24a60476e3855926d78484dd1b3c802203557cb66a42c944ef06e00bcc4da35a5bcb2f185aab0f8e403e519e1d66aaf750148304502210092a587aeb777f869e7ff0d7898ea619ee26a3dacd1f3672b945eea600be431100220077ee9eae3528d15251f2a52b607b189820e57a6ccfac8d1af502b132ee4016901475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10016
10017                                   { 0,
10018                                   "304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb89",
10019                                   "304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f",
10020                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402206fa54c11f98c3bae1e93df43fc7affeb05b476bf8060c03e29c377c69bc08e8b0220672701cce50d5c379ff45a5d2cfe48ac44973adb066ac32608e21221d869bb890147304402206e36c683ebf2cb16bcef3d5439cf8b53cd97280a365ed8acd7abb85a8ba5f21c02206e8621edfc2a5766cbc96eb67fd501127ff163eb6b85518a39f7d4974aef126f01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9148a486ff2e31d6158bf39e2608864d63fefd09d5b88ac6868f7010000" },
10021
10022                                   { 1,
10023                                   "3044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb5817",
10024                                   "304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc",
10025                                   "020000000001018db483bff65c70ee71d8282aeec5a880e2e2b39e45772bda5460403095c62e3f0100000000000000000176050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022057649739b0eb74d541ead0dfdb3d4b2c15aa192720031044c3434c67812e5ca902201e5ede42d960ae551707f4a6b34b09393cf4dee2418507daa022e3550dbb58170147304402207faad26678c8850e01b4a0696d60841f7305e1832b786110ee9075cb92ed14a30220516ef8ee5dfa80824ea28cbcec0dd95f8b847146257c16960db98507db15ffdc012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10026                 } );
10027
10028                 // commitment tx with three outputs untrimmed (minimum feerate)
10029                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10030                 chan.context.feerate_per_kw = 3703;
10031
10032                 test_commitment!("3045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e",
10033                                  "304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e1",
10034                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484eb936a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402201b736d1773a124c745586217a75bed5f66c05716fbe8c7db4fdb3c3069741cdd02205083f39c321c1bcadfc8d97e3c791a66273d936abac0c6a2fde2ed46019508e101483045022100b495d239772a237ff2cf354b1b11be152fd852704cb184e7356d13f2fb1e5e430220723db5cdb9cbd6ead7bfd3deb419cf41053a932418cbb22a67b581f40bc1f13e01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10035
10036                                   { 0,
10037                                   "3045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a",
10038                                   "3045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05",
10039                                   "0200000000010120060e4a29579d429f0f27c17ee5f1ee282f20d706d6f90b63d35946d8f3029a0000000000000000000175050000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100c34c61735f93f2e324cc873c3b248111ccf8f6db15d5969583757010d4ad2b4602207867bb919b2ddd6387873e425345c9b7fd18d1d66aba41f3607bc2896ef3c30a01483045022100988c143e2110067117d2321bdd4bd16ca1734c98b29290d129384af0962b634e02206c1b02478878c5f547018b833986578f90c3e9be669fe5788ad0072a55acbb05012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10040                 } );
10041
10042                 // anchors: commitment tx with three outputs untrimmed (minimum dust limit)
10043                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10044                 chan.context.feerate_per_kw = 3687;
10045                 chan.context.holder_dust_limit_satoshis = 3001;
10046                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10047
10048                 test_commitment_with_anchors!("3045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c228377",
10049                                  "3045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d",
10050                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80054a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994a00f000000000000220020ce6e751274836ff59622a0d1e07f8831d80bd6730bd48581398bfadd2bb8da9ac0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aa28b6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100c970799bcb33f43179eb43b3378a0a61991cf2923f69b36ef12548c3df0e6d500220413dc27d2e39ee583093adfcb7799be680141738babb31cc7b0669a777a31f5d01483045022100ad6c71569856b2d7ff42e838b4abe74a713426b37f22fa667a195a4c88908c6902202b37272b02a42dc6d9f4f82cab3eaf84ac882d9ed762859e1e75455c2c22837701475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10051
10052                                   { 0,
10053                                   "3044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c",
10054                                   "3045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de",
10055                                   "02000000000101542562b326c08e3a076d9cfca2be175041366591da334d8d513ff1686fd95a6002000000000100000001a00f0000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500473044022017b558a3cf5f0cb94269e2e927b29ed22bd2416abb8a7ce6de4d1256f359b93602202e9ca2b1a23ea3e69f433c704e327739e219804b8c188b1d52f74fd5a9de954c83483045022100af7a8b7c7ff2080c68995254cb66d64d9954edcc5baac3bb4f27ed2d29aaa6120220421c27da7a60574a9263f271e0f3bd34594ec6011095190022b3b54596ea03de012004040404040404040404040404040404040404040404040404040404040404048d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac6851b2756800000000" }
10056                 } );
10057
10058                 // commitment tx with three outputs untrimmed (maximum feerate)
10059                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10060                 chan.context.feerate_per_kw = 4914;
10061                 chan.context.holder_dust_limit_satoshis = 546;
10062                 chan.context.channel_type = cached_channel_type.clone();
10063
10064                 test_commitment!("3045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c95244",
10065                                  "3045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c19",
10066                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8003a00f0000000000002200208c48d15160397c9731df9bc3b236656efb6665fbfe92b4a6878e88a499f741c4c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484ae8f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100d72638bc6308b88bb6d45861aae83e5b9ff6e10986546e13bce769c70036e2620220320be7c6d66d22f30b9fcd52af66531505b1310ca3b848c19285b38d8a1a8c1901483045022100b4b16d5f8cc9fc4c1aff48831e832a0d8990e133978a66e302c133550954a44d022073573ce127e2200d316f6b612803a5c0c97b8d20e1e44dbe2ac0dd2fb8c9524401475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10067
10068                                   { 0,
10069                                   "3045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374",
10070                                   "30440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10",
10071                                   "02000000000101a9172908eace869cc35128c31fc2ab502f72e4dff31aab23e0244c4b04b11ab00000000000000000000122020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100f43591c156038ba217756006bb3c55f7d113a325cdd7d9303c82115372858d68022016355b5aadf222bc8d12e426c75f4a03423917b2443a103eb2a498a3a2234374014730440220585dee80fafa264beac535c3c0bb5838ac348b156fdc982f86adc08dfc9bfd250220130abb82f9f295cc9ef423dcfef772fde2acd85d9df48cc538981d26a10a9c10012004040404040404040404040404040404040404040404040404040404040404048a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a91418bc1a114ccf9c052d3d23e28d3b0a9d1227434288527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f801b175ac686800000000" }
10072                 } );
10073
10074                 // commitment tx with two outputs untrimmed (minimum feerate)
10075                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10076                 chan.context.feerate_per_kw = 4915;
10077                 chan.context.holder_dust_limit_satoshis = 546;
10078
10079                 test_commitment!("304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a720",
10080                                  "30450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf5",
10081                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8002c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484fa926a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221008a953551f4d67cb4df3037207fc082ddaf6be84d417b0bd14c80aab66f1b01a402207508796dc75034b2dee876fe01dc05a08b019f3e5d689ac8842ade2f1befccf50147304402203a286936e74870ca1459c700c71202af0381910a6bfab687ef494ef1bc3e02c902202506c362d0e3bee15e802aa729bf378e051644648253513f1c085b264cc2a72001475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10082
10083                 // anchors: commitment tx with two outputs untrimmed (minimum dust limit)
10084                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10085                 chan.context.feerate_per_kw = 4894;
10086                 chan.context.holder_dust_limit_satoshis = 4001;
10087                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10088
10089                 test_commitment_with_anchors!("3045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b95",
10090                                  "30450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd",
10091                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80044a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994ad0886a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e04004830450221009f16ac85d232e4eddb3fcd750a68ebf0b58e3356eaada45d3513ede7e817bf4c02207c2b043b4e5f971261975406cb955219fa56bffe5d834a833694b5abc1ce4cfd01483045022100e784a66b1588575801e237d35e510fd92a81ae3a4a2a1b90c031ad803d07b3f3022021bc5f16501f167607d63b681442da193eb0a76b4b7fd25c2ed4f8b28fd35b9501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10092
10093                 // commitment tx with two outputs untrimmed (maximum feerate)
10094                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10095                 chan.context.feerate_per_kw = 9651180;
10096                 chan.context.holder_dust_limit_satoshis = 546;
10097                 chan.context.channel_type = cached_channel_type.clone();
10098
10099                 test_commitment!("304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd3",
10100                                  "3045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de",
10101                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b800222020000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80ec0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e4840400483045022100e11b638c05c650c2f63a421d36ef8756c5ce82f2184278643520311cdf50aa200220259565fb9c8e4a87ccaf17f27a3b9ca4f20625754a0920d9c6c239d8156a11de0147304402200a8544eba1d216f5c5e530597665fa9bec56943c0f66d98fc3d028df52d84f7002201e45fa5c6bc3a506cc2553e7d1c0043a9811313fc39c954692c0d47cfce2bbd301475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10102
10103                 // commitment tx with one output untrimmed (minimum feerate)
10104                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10105                 chan.context.feerate_per_kw = 9651181;
10106
10107                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10108                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10109                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10110
10111                 // anchors: commitment tx with one output untrimmed (minimum dust limit)
10112                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10113                 chan.context.feerate_per_kw = 6216010;
10114                 chan.context.holder_dust_limit_satoshis = 4001;
10115                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10116
10117                 test_commitment_with_anchors!("30450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf",
10118                                  "30450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1",
10119                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80024a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994a04004830450221009ad80792e3038fe6968d12ff23e6888a565c3ddd065037f357445f01675d63f3022018384915e5f1f4ae157e15debf4f49b61c8d9d2b073c7d6f97c4a68caa3ed4c1014830450221008fd5dbff02e4b59020d4cd23a3c30d3e287065fda75a0a09b402980adf68ccda022001e0b8b620cd915ddff11f1de32addf23d81d51b90e6841b2cb8dcaf3faa5ecf01475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10120
10121                 // commitment tx with fee greater than funder amount
10122                 chan.context.value_to_self_msat = 6993000000; // 7000000000 - 7000000
10123                 chan.context.feerate_per_kw = 9651936;
10124                 chan.context.holder_dust_limit_satoshis = 546;
10125                 chan.context.channel_type = cached_channel_type;
10126
10127                 test_commitment!("304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a2",
10128                                  "304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a379",
10129                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8001c0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484040047304402207e8d51e0c570a5868a78414f4e0cbfaed1106b171b9581542c30718ee4eb95ba02203af84194c97adf98898c9afe2f2ed4a7f8dba05a2dfab28ac9d9c604aa49a3790147304402202ade0142008309eb376736575ad58d03e5b115499709c6db0b46e36ff394b492022037b63d78d66404d6504d4c4ac13be346f3d1802928a6d3ad95a6a944227161a201475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {});
10130
10131                 // commitment tx with 3 htlc outputs, 2 offered having the same amount and preimage
10132                 chan.context.value_to_self_msat = 7_000_000_000 - 2_000_000;
10133                 chan.context.feerate_per_kw = 253;
10134                 chan.context.pending_inbound_htlcs.clear();
10135                 chan.context.pending_inbound_htlcs.push({
10136                         let mut out = InboundHTLCOutput{
10137                                 htlc_id: 1,
10138                                 amount_msat: 2000000,
10139                                 cltv_expiry: 501,
10140                                 payment_hash: PaymentHash([0; 32]),
10141                                 state: InboundHTLCState::Committed,
10142                         };
10143                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap()).to_byte_array();
10144                         out
10145                 });
10146                 chan.context.pending_outbound_htlcs.clear();
10147                 chan.context.pending_outbound_htlcs.push({
10148                         let mut out = OutboundHTLCOutput{
10149                                 htlc_id: 6,
10150                                 amount_msat: 5000001,
10151                                 cltv_expiry: 506,
10152                                 payment_hash: PaymentHash([0; 32]),
10153                                 state: OutboundHTLCState::Committed,
10154                                 source: HTLCSource::dummy(),
10155                                 skimmed_fee_msat: None,
10156                                 blinding_point: None,
10157                         };
10158                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10159                         out
10160                 });
10161                 chan.context.pending_outbound_htlcs.push({
10162                         let mut out = OutboundHTLCOutput{
10163                                 htlc_id: 5,
10164                                 amount_msat: 5000000,
10165                                 cltv_expiry: 505,
10166                                 payment_hash: PaymentHash([0; 32]),
10167                                 state: OutboundHTLCState::Committed,
10168                                 source: HTLCSource::dummy(),
10169                                 skimmed_fee_msat: None,
10170                                 blinding_point: None,
10171                         };
10172                         out.payment_hash.0 = Sha256::hash(&<Vec<u8>>::from_hex("0505050505050505050505050505050505050505050505050505050505050505").unwrap()).to_byte_array();
10173                         out
10174                 });
10175
10176                 test_commitment!("304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c8",
10177                                  "304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c",
10178                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b8005d007000000000000220020748eba944fedc8827f6b06bc44678f93c0f9e6078b35c6331ed31e75f8ce0c2d8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121b8813000000000000220020305c12e1a0bc21e283c131cea1c66d68857d28b7b2fce0a6fbc40c164852121bc0c62d0000000000160014cc1b07838e387deacd0e5232e1e8b49f4c29e484a69f6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e040047304402200d10bf5bc5397fc59d7188ae438d80c77575595a2d488e41bd6363a810cc8d72022012b57e714fbbfdf7a28c47d5b370cb8ac37c8545f596216e5b21e9b236ef457c0147304402207d0870964530f97b62497b11153c551dca0a1e226815ef0a336651158da0f82402200f5378beee0e77759147b8a0a284decd11bfd2bc55c8fafa41c134fe996d43c801475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10179
10180                                   { 0,
10181                                   "3045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce5",
10182                                   "3044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b",
10183                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec000000000000000000011f070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b470fe12e5b7fea9eccb8cbff1972cea4f96758041898982a02bcc7f9d56d50b0220338a75b2afaab4ec00cdd2d9273c68c7581ff5a28bcbb40c4d138b81f1d45ce501473044022017b90c65207522a907fb6a137f9dd528b3389465a8ae72308d9e1d564f512cf402204fc917b4f0e88604a3e994f85bfae7c7c1f9d9e9f78e8cd112e0889720d9405b012001010101010101010101010101010101010101010101010101010101010101018a76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac686800000000" },
10184                                   { 1,
10185                                   "3045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a",
10186                                   "3045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d",
10187                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec01000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100b575379f6d8743cb0087648f81cfd82d17a97fbf8f67e058c65ce8b9d25df9500220554a210d65b02d9f36c6adf0f639430ca8293196ba5089bf67cc3a9813b7b00a01483045022100ee2e16b90930a479b13f8823a7f14b600198c838161160b9436ed086d3fc57e002202a66fa2324f342a17129949c640bfe934cbc73a869ba7c06aa25c5a3d0bfb53d01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868f9010000" },
10188                                   { 2,
10189                                   "30440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc",
10190                                   "304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb",
10191                                   "020000000001014bdccf28653066a2c554cafeffdfe1e678e64a69b056684deb0c4fba909423ec02000000000000000001e1120000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220471c9f3ad92e49b13b7b8059f43ecf8f7887b0dccbb9fdb54bfe23d62a8ae332022024bd22fae0740e86a44228c35330da9526fd7306dffb2b9dc362d5e78abef7cc0147304402207157f452f2506d73c315192311893800cfb3cc235cc1185b1cfcc136b55230db022014be242dbc6c5da141fec4034e7f387f74d6ff1899453d72ba957467540e1ecb01008576a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6868fa010000" }
10192                 } );
10193
10194                 chan.context.channel_type = ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies();
10195                 test_commitment_with_anchors!("3044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c725",
10196                                  "3045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b76",
10197                                  "02000000000101bef67e4e2fb9ddeeb3461973cd4c62abb35050b1add772995b820b584a488489000000000038b02b80074a010000000000002200202b1b5854183c12d3316565972c4668929d314d81c5dcdbb21cb45fe8a9a8114f4a01000000000000220020e9e86e4823faa62e222ebc858a226636856158f07e69898da3b0d1af0ddb3994d007000000000000220020fe0598d74fee2205cc3672e6e6647706b4f3099713b4661b62482c3addd04a5e881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7881300000000000022002018e40f9072c44350f134bdc887bab4d9bdfc8aa468a25616c80e21757ba5dac7c0c62d0000000000220020f3394e1e619b0eca1f91be2fb5ab4dfc59ba5b84ebe014ad1d43a564d012994aad9c6a00000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0400483045022100b4014970d9d7962853f3f85196144671d7d5d87426250f0a5fdaf9a55292e92502205360910c9abb397467e19dbd63d081deb4a3240903114c98cec0a23591b79b7601473044022027b38dfb654c34032ffb70bb43022981652fce923cbbe3cbe7394e2ade8b34230220584195b78da6e25c2e8da6b4308d9db25b65b64975db9266163ef592abb7c72501475221023da092f6980e58d2c037173180e9a465476026ee50f96695963e8efe436f54eb21030e9f7b623d2ccc7c9bd44d66d5ce21ce504c0acf6385a132cec6d3c39fa711c152ae3e195220", {
10198
10199                                   { 0,
10200                                   "30440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c2",
10201                                   "304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424",
10202                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c402000000000100000001d0070000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e05004730440220078fe5343dab88c348a3a8a9c1a9293259dbf35507ae971702cc39dd623ea9af022011ed0c0f35243cd0bb4d9ca3c772379b2b5f4af93140e9fdc5600dfec1cdb0c28347304402205df665e2908c7690d2d33eb70e6e119958c28febe141a94ed0dd9a55ce7c8cfc0220364d02663a5d019af35c5cd5fda9465d985d85bbd12db207738d61163449a424012001010101010101010101010101010101010101010101010101010101010101018d76a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c8201208763a9144b6b2e5444c2639cc0fb7bcea5afba3f3cdce23988527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae677502f501b175ac6851b2756800000000" },
10203                                   { 1,
10204                                   "304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c",
10205                                   "304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b",
10206                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40300000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e050047304402202df6bf0f98a42cfd0172a16bded7d1b16c14f5f42ba23f5c54648c14b647531302200fe1508626817f23925bb56951d5e4b2654c751743ab6db48a6cce7dda17c01c8347304402203f99ec05cdd89558a23683b471c1dcce8f6a92295f1fff3b0b5d21be4d4f97ea022019d29070690fc2c126fe27cc4ab2f503f289d362721b2efa7418e7fddb939a5b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568f9010000" },
10207                                   { 2,
10208                                   "3045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a1",
10209                                   "3045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b",
10210                                   "020000000001013d060d0305c9616eaabc21d41fae85bcb5477b5d7f1c92aa429cf15339bbe1c40400000000010000000188130000000000002200204adb4e2f00643db396dd120d4e7dc17625f5f2c11a40d857accc862d6b7dd80e0500483045022100bd206b420c495f3aa714d3ea4766cbe95441deacb5d2f737f1913349aee7c2ae02200249d2c950dd3b15326bf378ae5d2b871d33d6737f5d70735f3de8383140f2a183483045022100f2cd35e385b9b7e15b92a5d78d120b6b2c5af4e974bc01e884c5facb3bb5966c0220706e0506477ce809a40022d6de8e041e9ef13136c45abee9c36f58a01fdb188b01008876a91414011f7254d96b819c76986c277d115efce6f7b58763ac67210394854aa6eab5b2a8122cc726e9dded053a2184d88256816826d6231c068d4a5b7c820120876475527c21030d417a46946384f88d5f3337267c5e579765875dc4daca813e21734b140639e752ae67a9142002cc93ebefbb1b73f0af055dcc27a0b504ad7688ac6851b27568fa010000" }
10211                 } );
10212         }
10213
10214         #[test]
10215         fn test_per_commitment_secret_gen() {
10216                 // Test vectors from BOLT 3 Appendix D:
10217
10218                 let mut seed = [0; 32];
10219                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0000000000000000000000000000000000000000000000000000000000000000").unwrap());
10220                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10221                            <Vec<u8>>::from_hex("02a40c85b6f28da08dfdbe0926c53fab2de6d28c10301f8f7c4073d5e42e3148").unwrap()[..]);
10222
10223                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF").unwrap());
10224                 assert_eq!(chan_utils::build_commitment_secret(&seed, 281474976710655),
10225                            <Vec<u8>>::from_hex("7cc854b54e3e0dcdb010d7a3fee464a9687be6e8db3be6854c475621e007a5dc").unwrap()[..]);
10226
10227                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0xaaaaaaaaaaa),
10228                            <Vec<u8>>::from_hex("56f4008fb007ca9acf0e15b054d5c9fd12ee06cea347914ddbaed70d1c13a528").unwrap()[..]);
10229
10230                 assert_eq!(chan_utils::build_commitment_secret(&seed, 0x555555555555),
10231                            <Vec<u8>>::from_hex("9015daaeb06dba4ccc05b91b2f73bd54405f2be9f217fbacd3c5ac2e62327d31").unwrap()[..]);
10232
10233                 seed[0..32].clone_from_slice(&<Vec<u8>>::from_hex("0101010101010101010101010101010101010101010101010101010101010101").unwrap());
10234                 assert_eq!(chan_utils::build_commitment_secret(&seed, 1),
10235                            <Vec<u8>>::from_hex("915c75942a26bb3a433a8ce2cb0427c29ec6c1775cfc78328b57f6ba7bfeaa9c").unwrap()[..]);
10236         }
10237
10238         #[test]
10239         fn test_key_derivation() {
10240                 // Test vectors from BOLT 3 Appendix E:
10241                 let secp_ctx = Secp256k1::new();
10242
10243                 let base_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f").unwrap()[..]).unwrap();
10244                 let per_commitment_secret = SecretKey::from_slice(&<Vec<u8>>::from_hex("1f1e1d1c1b1a191817161514131211100f0e0d0c0b0a09080706050403020100").unwrap()[..]).unwrap();
10245
10246                 let base_point = PublicKey::from_secret_key(&secp_ctx, &base_secret);
10247                 assert_eq!(base_point.serialize()[..], <Vec<u8>>::from_hex("036d6caac248af96f6afa7f904f550253a0f3ef3f5aa2fe6838a95b216691468e2").unwrap()[..]);
10248
10249                 let per_commitment_point = PublicKey::from_secret_key(&secp_ctx, &per_commitment_secret);
10250                 assert_eq!(per_commitment_point.serialize()[..], <Vec<u8>>::from_hex("025f7117a78150fe2ef97db7cfc83bd57b2e2c0d0dd25eaf467a4a1c2a45ce1486").unwrap()[..]);
10251
10252                 assert_eq!(chan_utils::derive_private_key(&secp_ctx, &per_commitment_point, &base_secret),
10253                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("cbced912d3b21bf196a766651e436aff192362621ce317704ea2f75d87e7be0f").unwrap()[..]).unwrap());
10254
10255                 assert_eq!(RevocationKey::from_basepoint(&secp_ctx, &RevocationBasepoint::from(base_point), &per_commitment_point).to_public_key().serialize()[..],
10256                                 <Vec<u8>>::from_hex("02916e326636d19c33f13e8c0c3a03dd157f332f3e99c317c141dd865eb01f8ff0").unwrap()[..]);
10257
10258                 assert_eq!(chan_utils::derive_private_revocation_key(&secp_ctx, &per_commitment_secret, &base_secret),
10259                                 SecretKey::from_slice(&<Vec<u8>>::from_hex("d09ffff62ddb2297ab000cc85bcb4283fdeb6aa052affbc9dddcf33b61078110").unwrap()[..]).unwrap());
10260         }
10261
10262         #[test]
10263         fn test_zero_conf_channel_type_support() {
10264                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10265                 let secp_ctx = Secp256k1::new();
10266                 let seed = [42; 32];
10267                 let network = Network::Testnet;
10268                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10269                 let logger = test_utils::TestLogger::new();
10270
10271                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10272                 let config = UserConfig::default();
10273                 let node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10274                         node_b_node_id, &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42, None).unwrap();
10275
10276                 let mut channel_type_features = ChannelTypeFeatures::only_static_remote_key();
10277                 channel_type_features.set_zero_conf_required();
10278
10279                 let mut open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10280                 open_channel_msg.common_fields.channel_type = Some(channel_type_features);
10281                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10282                 let res = InboundV1Channel::<&TestKeysInterface>::new(&feeest, &&keys_provider, &&keys_provider,
10283                         node_b_node_id, &channelmanager::provided_channel_type_features(&config),
10284                         &channelmanager::provided_init_features(&config), &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false);
10285                 assert!(res.is_ok());
10286         }
10287
10288         #[test]
10289         fn test_supports_anchors_zero_htlc_tx_fee() {
10290                 // Tests that if both sides support and negotiate `anchors_zero_fee_htlc_tx`, it is the
10291                 // resulting `channel_type`.
10292                 let secp_ctx = Secp256k1::new();
10293                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10294                 let network = Network::Testnet;
10295                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10296                 let logger = test_utils::TestLogger::new();
10297
10298                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10299                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10300
10301                 let mut config = UserConfig::default();
10302                 config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true;
10303
10304                 // It is not enough for just the initiator to signal `option_anchors_zero_fee_htlc_tx`, both
10305                 // need to signal it.
10306                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10307                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10308                         &channelmanager::provided_init_features(&UserConfig::default()), 10000000, 100000, 42,
10309                         &config, 0, 42, None
10310                 ).unwrap();
10311                 assert!(!channel_a.context.channel_type.supports_anchors_zero_fee_htlc_tx());
10312
10313                 let mut expected_channel_type = ChannelTypeFeatures::empty();
10314                 expected_channel_type.set_static_remote_key_required();
10315                 expected_channel_type.set_anchors_zero_fee_htlc_tx_required();
10316
10317                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10318                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10319                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10320                         None
10321                 ).unwrap();
10322
10323                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10324                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10325                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10326                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10327                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10328                 ).unwrap();
10329
10330                 assert_eq!(channel_a.context.channel_type, expected_channel_type);
10331                 assert_eq!(channel_b.context.channel_type, expected_channel_type);
10332         }
10333
10334         #[test]
10335         fn test_rejects_implicit_simple_anchors() {
10336                 // Tests that if `option_anchors` is being negotiated implicitly through the intersection of
10337                 // each side's `InitFeatures`, it is rejected.
10338                 let secp_ctx = Secp256k1::new();
10339                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10340                 let network = Network::Testnet;
10341                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10342                 let logger = test_utils::TestLogger::new();
10343
10344                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10345                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10346
10347                 let config = UserConfig::default();
10348
10349                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10350                 let static_remote_key_required: u64 = 1 << 12;
10351                 let simple_anchors_required: u64 = 1 << 20;
10352                 let raw_init_features = static_remote_key_required | simple_anchors_required;
10353                 let init_features_with_simple_anchors = InitFeatures::from_le_bytes(raw_init_features.to_le_bytes().to_vec());
10354
10355                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10356                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10357                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10358                         None
10359                 ).unwrap();
10360
10361                 // Set `channel_type` to `None` to force the implicit feature negotiation.
10362                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10363                 open_channel_msg.common_fields.channel_type = None;
10364
10365                 // Since A supports both `static_remote_key` and `option_anchors`, but B only accepts
10366                 // `static_remote_key`, it will fail the channel.
10367                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10368                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10369                         &channelmanager::provided_channel_type_features(&config), &init_features_with_simple_anchors,
10370                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10371                 );
10372                 assert!(channel_b.is_err());
10373         }
10374
10375         #[test]
10376         fn test_rejects_simple_anchors_channel_type() {
10377                 // Tests that if `option_anchors` is being negotiated through the `channel_type` feature,
10378                 // it is rejected.
10379                 let secp_ctx = Secp256k1::new();
10380                 let fee_estimator = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10381                 let network = Network::Testnet;
10382                 let keys_provider = test_utils::TestKeysInterface::new(&[42; 32], network);
10383                 let logger = test_utils::TestLogger::new();
10384
10385                 let node_id_a = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[1; 32]).unwrap());
10386                 let node_id_b = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[2; 32]).unwrap());
10387
10388                 let config = UserConfig::default();
10389
10390                 // See feature bit assignments: https://github.com/lightning/bolts/blob/master/09-features.md
10391                 let static_remote_key_required: u64 = 1 << 12;
10392                 let simple_anchors_required: u64 = 1 << 20;
10393                 let simple_anchors_raw_features = static_remote_key_required | simple_anchors_required;
10394                 let simple_anchors_init = InitFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10395                 let simple_anchors_channel_type = ChannelTypeFeatures::from_le_bytes(simple_anchors_raw_features.to_le_bytes().to_vec());
10396                 assert!(!simple_anchors_init.requires_unknown_bits());
10397                 assert!(!simple_anchors_channel_type.requires_unknown_bits());
10398
10399                 // First, we'll try to open a channel between A and B where A requests a channel type for
10400                 // the original `option_anchors` feature (non zero fee htlc tx). This should be rejected by
10401                 // B as it's not supported by LDK.
10402                 let channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10403                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b,
10404                         &channelmanager::provided_init_features(&config), 10000000, 100000, 42, &config, 0, 42,
10405                         None
10406                 ).unwrap();
10407
10408                 let mut open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10409                 open_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10410
10411                 let res = InboundV1Channel::<&TestKeysInterface>::new(
10412                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10413                         &channelmanager::provided_channel_type_features(&config), &simple_anchors_init,
10414                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10415                 );
10416                 assert!(res.is_err());
10417
10418                 // Then, we'll try to open another channel where A requests a channel type for
10419                 // `anchors_zero_fee_htlc_tx`. B is malicious and tries to downgrade the channel type to the
10420                 // original `option_anchors` feature, which should be rejected by A as it's not supported by
10421                 // LDK.
10422                 let mut channel_a = OutboundV1Channel::<&TestKeysInterface>::new(
10423                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_b, &simple_anchors_init,
10424                         10000000, 100000, 42, &config, 0, 42, None
10425                 ).unwrap();
10426
10427                 let open_channel_msg = channel_a.get_open_channel(ChainHash::using_genesis_block(network));
10428
10429                 let channel_b = InboundV1Channel::<&TestKeysInterface>::new(
10430                         &fee_estimator, &&keys_provider, &&keys_provider, node_id_a,
10431                         &channelmanager::provided_channel_type_features(&config), &channelmanager::provided_init_features(&config),
10432                         &open_channel_msg, 7, &config, 0, &&logger, /*is_0conf=*/false
10433                 ).unwrap();
10434
10435                 let mut accept_channel_msg = channel_b.get_accept_channel_message();
10436                 accept_channel_msg.common_fields.channel_type = Some(simple_anchors_channel_type.clone());
10437
10438                 let res = channel_a.accept_channel(
10439                         &accept_channel_msg, &config.channel_handshake_limits, &simple_anchors_init
10440                 );
10441                 assert!(res.is_err());
10442         }
10443
10444         #[test]
10445         fn test_waiting_for_batch() {
10446                 let feeest = LowerBoundedFeeEstimator::new(&TestFeeEstimator{fee_est: 15000});
10447                 let logger = test_utils::TestLogger::new();
10448                 let secp_ctx = Secp256k1::new();
10449                 let seed = [42; 32];
10450                 let network = Network::Testnet;
10451                 let best_block = BestBlock::from_network(network);
10452                 let chain_hash = ChainHash::using_genesis_block(network);
10453                 let keys_provider = test_utils::TestKeysInterface::new(&seed, network);
10454
10455                 let mut config = UserConfig::default();
10456                 // Set trust_own_funding_0conf while ensuring we don't send channel_ready for a
10457                 // channel in a batch before all channels are ready.
10458                 config.channel_handshake_limits.trust_own_funding_0conf = true;
10459
10460                 // Create a channel from node a to node b that will be part of batch funding.
10461                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[42; 32]).unwrap());
10462                 let mut node_a_chan = OutboundV1Channel::<&TestKeysInterface>::new(
10463                         &feeest,
10464                         &&keys_provider,
10465                         &&keys_provider,
10466                         node_b_node_id,
10467                         &channelmanager::provided_init_features(&config),
10468                         10000000,
10469                         100000,
10470                         42,
10471                         &config,
10472                         0,
10473                         42,
10474                         None
10475                 ).unwrap();
10476
10477                 let open_channel_msg = node_a_chan.get_open_channel(ChainHash::using_genesis_block(network));
10478                 let node_b_node_id = PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[7; 32]).unwrap());
10479                 let mut node_b_chan = InboundV1Channel::<&TestKeysInterface>::new(
10480                         &feeest,
10481                         &&keys_provider,
10482                         &&keys_provider,
10483                         node_b_node_id,
10484                         &channelmanager::provided_channel_type_features(&config),
10485                         &channelmanager::provided_init_features(&config),
10486                         &open_channel_msg,
10487                         7,
10488                         &config,
10489                         0,
10490                         &&logger,
10491                         true,  // Allow node b to send a 0conf channel_ready.
10492                 ).unwrap();
10493
10494                 let accept_channel_msg = node_b_chan.accept_inbound_channel();
10495                 node_a_chan.accept_channel(
10496                         &accept_channel_msg,
10497                         &config.channel_handshake_limits,
10498                         &channelmanager::provided_init_features(&config),
10499                 ).unwrap();
10500
10501                 // Fund the channel with a batch funding transaction.
10502                 let output_script = node_a_chan.context.get_funding_redeemscript();
10503                 let tx = Transaction {
10504                         version: 1,
10505                         lock_time: LockTime::ZERO,
10506                         input: Vec::new(),
10507                         output: vec![
10508                                 TxOut {
10509                                         value: 10000000, script_pubkey: output_script.clone(),
10510                                 },
10511                                 TxOut {
10512                                         value: 10000000, script_pubkey: Builder::new().into_script(),
10513                                 },
10514                         ]};
10515                 let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 };
10516                 let funding_created_msg = node_a_chan.get_funding_created(
10517                         tx.clone(), funding_outpoint, true, &&logger,
10518                 ).map_err(|_| ()).unwrap();
10519                 let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(
10520                         &funding_created_msg.unwrap(),
10521                         best_block,
10522                         &&keys_provider,
10523                         &&logger,
10524                 ).map_err(|_| ()).unwrap();
10525                 let node_b_updates = node_b_chan.monitor_updating_restored(
10526                         &&logger,
10527                         &&keys_provider,
10528                         chain_hash,
10529                         &config,
10530                         0,
10531                 );
10532
10533                 // Receive funding_signed, but the channel will be configured to hold sending channel_ready and
10534                 // broadcasting the funding transaction until the batch is ready.
10535                 let res = node_a_chan.funding_signed(
10536                         &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger,
10537                 );
10538                 let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); };
10539                 let node_a_updates = node_a_chan.monitor_updating_restored(
10540                         &&logger,
10541                         &&keys_provider,
10542                         chain_hash,
10543                         &config,
10544                         0,
10545                 );
10546                 // Our channel_ready shouldn't be sent yet, even with trust_own_funding_0conf set,
10547                 // as the funding transaction depends on all channels in the batch becoming ready.
10548                 assert!(node_a_updates.channel_ready.is_none());
10549                 assert!(node_a_updates.funding_broadcastable.is_none());
10550                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH));
10551
10552                 // It is possible to receive a 0conf channel_ready from the remote node.
10553                 node_a_chan.channel_ready(
10554                         &node_b_updates.channel_ready.unwrap(),
10555                         &&keys_provider,
10556                         chain_hash,
10557                         &config,
10558                         &best_block,
10559                         &&logger,
10560                 ).unwrap();
10561                 assert_eq!(
10562                         node_a_chan.context.channel_state,
10563                         ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH | AwaitingChannelReadyFlags::THEIR_CHANNEL_READY)
10564                 );
10565
10566                 // Clear the ChannelState::WaitingForBatch only when called by ChannelManager.
10567                 node_a_chan.set_batch_ready();
10568                 assert_eq!(node_a_chan.context.channel_state, ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::THEIR_CHANNEL_READY));
10569                 assert!(node_a_chan.check_get_channel_ready(0).is_some());
10570         }
10571 }